Lines of
samesite.py
from check-in 7b27f1db02
that are changed by the sequence of edits moving toward
check-in 53dcfdb8f7:
1: #!/usr/bin/env python3.1
2:
3: import datetime, http.cookiejar, optparse, os, sys, shelve, re, urllib.request
4:
5: parser = optparse.OptionParser()
6: parser.add_option('-v', '--verbose', action = 'store_true', dest = 'verbose', help = 'turns on verbose status notifications', metavar = 'bool', default = False)
7: parser.add_option('-d', '--dir', action = 'store', dest = 'dir', help = 'specify directory where the files should be stored', metavar = 'string', default = None)
8: parser.add_option('-r', '--root', action = 'store', dest = 'root', help = 'specify a site from which data should be mirrored', metavar = 'string', default = None)
9: parser.add_option('-l', '--log', action = 'store', dest = 'log', help = 'specify a log file to process', metavar = 'string', default = None)
10: (options, args) = parser.parse_args()
11:
12: if not options.dir:
13: print('Directory not specified')
14: exit(1)
15:
16: if not options.root:
17: print('Server not specified')
18: exit(1)
19:
20: if not options.log:
21: print('Log file not specified')
22: exit(1)
23:
24: if not os.access(options.log, os.R_OK):
25: print('Log file unreadable')
26: exit(1)
27:
28: # this is file index - everything is stored in this file
29: index = shelve.open(options.dir + '/.index')
30: desc_fields = ('Content-Length', 'ETag', 'Pragma', 'Last-Modified')
31: ignore_fields = ('Accept-Ranges', 'Age', 'Cache-Control', 'Connection', 'Content-Type', 'Date', 'Expires', 'Server', 'Via', 'X-Cache', 'X-Cache-Lookup')
32:
33: block_size = 32768
34:
35: while True:
36: unchecked_files = set()
37: checked_files = 0
38:
39: # reading log and storing found urls for processing
40: # check file mtime XXX
41: with open(options.log, 'r') as log_file:
42: log_line = re.compile('^[^ ]+ - - \[.*] "(GET|HEAD) (.*?)(\?.*)? HTTP/1.1" (\d+) \d+ "(.*)" "(.*)"$')
43: for line in log_file:
44: this_line = log_line.match(line.strip())
45: if this_line:
46: unchecked_files.add(this_line.group(2))
47:
48: for url in unchecked_files:
49:
50: # creating empty placeholder in index
51: if not url in index:
52: index[url] = {}
53: reload = False
54:
55: # creating file name from url
56: file_name = options.dir + re.compile('%20').sub(' ', url)
57:
58: # forcibly checking file if no file present
59: if not os.access(file_name, os.R_OK):
60: reload = True
61:
62: # forcibly checking file if file size doesn't match with index data
63: elif 'Content-Length' in index[url] and os.stat(file_name).st_size != int(index[url]['Content-Length']):
64: print('File size is', os.stat(file_name).st_size, 'and stored file size is', index[url]['Content-Length'])
65: reload = True
66:
67: # forcibly checking file if index hods Pragma header
68: if 'Pragma' in index[url] and index[url]['Pragma'] == 'no-cache':
69: reload = True
70:
71: # skipping file processing if there's no need to recheck it and we have checked it at least 4 hours ago
72: if not reload and '__time__' in index[url] and (datetime.datetime.now() - datetime.timedelta(hours = 4) - index[url]['__time__']).days < 0:
73: continue
7b27f1db02 2010-07-01 74: print('Checking file:', url)
75: try:
76: with urllib.request.urlopen(options.root + url) as source:
77: new_headers = {}
78: headers = source.info()
79:
80: # stripping unneeded headers (XXX make this inplace?)
81: for header in headers:
82: if header in desc_fields:
83: if header == 'Pragma' and headers[header] != 'no-cache':
84: print('Pragma:', headers[header])
85: new_headers[header] = headers[header]
86: elif not header in ignore_fields:
87: print('Undefined header "', header, '": ', headers[header], sep='')
88:
89: # comparing headers with data found in index
90: # if any header has changed (except Pragma) file is fully downloaded
91: # same if we get more or less headers
92: old_keys = set(index[url].keys())
93: old_keys.discard('__time__')
94: old_keys.discard('Pragma')
95: more_keys = set(new_headers.keys()) - old_keys
96: more_keys.discard('Pragma')
97: less_keys = old_keys - set(new_headers.keys())
98: if len(more_keys) > 0:
7b27f1db02 2010-07-01 99: print('More headers appear:', more_keys)
100: reload = True
101: elif len(less_keys) > 0:
102: print('Less headers appear:', less_keys)
103: reload = True
104: else:
105: for key in index[url].keys():
106: if key not in ('__time__', 'Pragma') and not index[url][key] == new_headers[key]:
107: print('Header "', key, '" changed from [', index[url][key], '] to [', new_headers[key], ']', sep='')
108: reload = True
109:
110: # downloading file
111: if reload:
112: if 'Content-Length' in headers:
113: print('Downloading', headers['Content-Length'], 'bytes [', end='')
114: else:
115: print('Downloading [', end='')
116: sys.stdout.flush()
117:
118: # file is created at temporary location and moved in place only when download completes
119: temp_file = open(options.dir + '/.tmp', 'wb')
120: buffer = source.read(block_size)
121: blocks = 0
122: megs = 0
123: while len(buffer) > 0:
124: temp_file.write(buffer)
125: print('.', end='')
126: sys.stdout.flush()
127: buffer = source.read(block_size)
128: blocks += 1
129: if blocks > 1024*1024/block_size:
130: blocks = blocks - 1024*1024/block_size
131: megs += 1
132: print('{}Mb'.format(megs), end='')
133: temp_file.close()
134: print(']')
135: os.renames(options.dir + '/.tmp', file_name)
136:
137: checked_files += 1
138:
139: # storing new time mark and storing new headers
140: new_headers['__time__'] = datetime.datetime.now()
141: index[url] = new_headers
142: index.sync()
143:
144: except urllib.error.HTTPError as error:
145: # in case of error we don't need to do anything actually,
146: # if file download stalls or fails the file would not be moved to it's location
147: print(error)
148:
149: print('[', len(unchecked_files), '/', checked_files, ']')
150:
151: # checking if there were any files downloaded, if yes - restarting sequence
152: if checked_files == 0:
153: break