digital literacy for everyone


[lit]

[generate-title]

[lit] [fig] #### license: creative commons cc0 1.0 (public domain) #### http://creativecommons.org/publicdomain/zero/1.0/ proginf = "wikidump 0.1 jul 2019 mn" # dump xml for each wiki page on techrights (no edit hist) # create each actual file function dumppage url urltext = arrcurl url title = "" forin each urltext findtitle = instr each "<title>" iftrue = findtitle title = each ltrim rtrim split title "<title>" join title "" split title "</title>" join title "" break fig next iftrue title outfilename = "techrights_" plus title plus ".xml" split outfilename "/" join outfilename ":slash:" open "w" forin each urltext now = each fprint outfilename next now = outfilename close fig fig # get list of pages allpages = arrcurl "http://techrights.org/wiki/index.php/Special:AllPages" allpageslen = allpages len longest = 0 longestindex = 0 for each 1 allpageslen 1 eachlen = allpages mid each 1 len ifmore eachlen longest longest = eachlen longestindex = each fig next # process list of pages and call dumppage for each quot = 34 chr pages = allpages mid longestindex 1 split pages quot forin each pages iswiki = instr each "/wiki/index.php/" ifequal each "/wiki/index.php/Special:AllPages" ignoreit else iftrue iswiki now = "http://techrights.org" plus each split now "http://techrights.org/wiki/index.php/" join now "http://techrights.org/wiki/index.php/Special:Export/" dumppage now fig fig next # create tgz archive pos = 0 python if figosname != "nt": pos = 1 fig iftrue pos tm = time split tm ":" join tm "." dt = date split dt "/" join dt "-" tgzname = "techrightswiki_" plus dt plus "_" plus tm plus ".tar.gz" now = "tar -cvzf " plus tgzname plus " techrights_*" shell fig
back to main fig page: [lit]https://codeinfig.neocities.org/fig/[lit] home: [lit]https://codeinfig.neocities.org[lit]