Lines of
samesite.py
from check-in 80f8e3804a
that are changed by the sequence of edits moving toward
check-in d0071bdbc7:
1: #!/usr/bin/env python3.1
2:
3: import datetime, http.cookiejar, optparse, os, sys, shelve, re, urllib.request
4:
5: from spacemap import SpaceMap
6:
7: parser = optparse.OptionParser()
8: parser.add_option('-v', '--verbose', action = 'store_true', dest = 'verbose', help = 'turns on verbose status notifications', metavar = 'bool', default = False)
9: parser.add_option('-d', '--dir', action = 'store', dest = 'dir', help = 'specify directory where the files should be stored', metavar = 'string', default = None)
10: parser.add_option('-r', '--root', action = 'store', dest = 'root', help = 'specify a site from which data should be mirrored', metavar = 'string', default = None)
11: parser.add_option('-l', '--log', action = 'store', dest = 'log', help = 'specify a log file to process', metavar = 'string', default = None)
12: parser.add_option('-e', '--skip-etag', action = 'store_true', dest = 'noetag', help = 'do not process etags', metavar = 'bool', default = False)
13: parser.add_option('-p', '--port', action = 'store', dest = 'port', help = 'listen on this port for incoming connections', metavar = 'integer', default = None)
14: parser.add_option('-n', '--no-update', action = 'store_true', dest = 'noupdate', help = 'do not update already downloaded files', metavar = 'bool', default = 'False')
15: (options, args) = parser.parse_args()
16:
17: assert options.dir, 'Directory not specified'
18: assert options.root, 'Server not specified'
19: assert options.log or options.port, 'Log file or port not specified'
20: assert options.port or os.access(options.log, os.R_OK), 'Log file unreadable'
21:
22: optionsDirWithSep = re.compile('^(.*?)/?$').match(options.dir)
23: if optionsDirWithSep:
24: options.dir = optionsDirWithSep.group(1)
25:
26: # this is file index - everything is stored in this file
27: # _parts - list of stored parts of file
28: # _time - last time the file was checked
29: # everything else is just the headers
30: index = shelve.open(options.dir + os.sep + '.index')
31: desc_fields = ('Content-Length', 'Pragma', 'Last-Modified')
32: ignore_fields = ('Accept-Ranges', 'Age', 'Cache-Control', 'Connection', 'Content-Type', 'Date', 'Expires', 'Server', 'Via', 'X-Cache', 'X-Cache-Lookup', 'X-Powered-By')
33:
34: if not options.noetag:
35: desc_fields += 'ETag',
36: else:
37: ignore_fields += 'ETag',
38:
39: block_size = 4096
40:
41: temp_file_name = options.dir + os.sep + '.tmp'
42:
43: '''
44: # later, kqueue would be good but later
45: class Connection:
46: __slots__ = frozenset(('__address', '__input', '__socket', '__status', 'error', 'method', 'url', 'http_version'))
47:
48: def __init__(self, socket, address):
49: self.__address = address
50: self.__input = b''
51: self.__socket = socket
52: self.__status = 0
53:
54: def read(self, kev):
55: buffer = self.__socket.recv(kev.data)
56: exhausted = False
57: if len(buffer) == 0:
58: eof = True
59: else:
60: self.__input += buffer
61: while not exhausted:
62: if self.__status == -1:
63: exhausted = True
64: elif self.__status == 0:
65: endstring = self.__input.find(b'\n')
66: if endstring > 0:
67: print('Processing request line.')
68: line = self.__input[:endstring].decode('ascii')
69: self.__input = self.__input[endstring + 1:]
70: isRequest = re.compile('(GET) ([^ ]+) HTTP/(1\.0)').match(line)
71: if not isRequest:
72: self.error = 'Not a HTTP connection.'
73: self.__status = -1
74: else:
75: self.method = isRequest.group(1)
76: self.url = isRequest.group(2)
77: self.http_version = isRequest.group(3)
78: self.__status = 1
79: else:
80: exhausted = True
81: elif self.__status == 1:
82: endstring = self.__input.find(b'\n')
83: if endstring > 0:
84: print('Processing header line.' + repr(self.__input))
85: line = self.__input[:endstring].decode('ascii')
86: self.__input = self.__input[endstring + 1:]
87: isHeader = re.compile('([^:]*): +(.*)').match(line)
88: if not isHeader:
89: self.error = 'Bad header.'
90: return(False)
91: # process header here
92: elif endstring == 0:
93: self.__status = 2
94: else:
95: exhausted = True
96:
97: def write(self, kev):
98: pass
99:
100: if options.port:
101: import select, socket
102:
103: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
104: try:
105: sock.bind(('127.0.0.1', int(options.port)))
106: sock.listen(-1)
107:
108: kq = select.kqueue()
109: assert kq.fileno() != -1, "Fatal error: can't initialise kqueue."
110:
111: kq.control([select.kevent(sock, select.KQ_FILTER_READ, select.KQ_EV_ADD)], 0)
112: timeout = None
113:
114: connections = {sock.fileno(): None}
115:
116: while True:
117: kevs = kq.control(None, 1, timeout)
118:
119: for kev in kevs:
120: if type(connections[kev.ident]) == Connection:
121: print(kev.ident, kev.data, kev.filter, kev.flags)
122: assert kev.data != 0, 'No data available.'
123: if kev.filter == select.KQ_FILTER_READ:
124: connections[kev.ident].read(kev)
125: elif kev.filter == select.KQ_FILTER_WRITE:
126: connections[kev.ident].write(kev)
127: else:
128: assert kev.filter in (select.KQ_FILTER_READ, select.KQ_FILTER_WRITE), 'Do we support other filters?'
129: else:
130: (conn, addr) = sock.accept()
131: print('Connection from ' + repr(addr))
132: kq.control([select.kevent(conn, select.KQ_FILTER_READ, select.KQ_EV_ADD)], 0)
133: connections[conn.fileno()] = Connection(conn, addr)
134:
135: if kev.flags >> 15 == 1:
136: kq.control([select.kevent(kev.ident, select.KQ_FILTER_READ, select.KQ_EV_DELETE)], 0)
137: kq.control([select.kevent(kev.ident, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)], 0)
138: del(connections[kev.ident])
139: finally:
140: sock.close()
141: '''
142:
143: if options.port:
144: import http.server
145:
146: class MyRequestHandler(http.server.BaseHTTPRequestHandler):
147: def __process(self):
148: # reload means file needs to be reloaded to serve request
149: reload = False
150: # recheck means file needs to be checked, this also means that if file hav been modified we can serve older copy
151: recheck = False
152: # file_stat means file definitely exists
153: file_stat = None
154: # requested_ranges holds data about any range requested
155: requested_ranges = None
156: # records holds data from index locally, should be written back upon successfull completion
157: record = None
158: info = 'Checking file: ' + self.path
159:
160: proxy_ignored = ('Accept', 'Accept-Encoding',
161: 'Cache-Control', 'Connection',
162: 'Host',
163: 'User-Agent',
164: 'Via',
165: 'X-Forwarded-For',
166: )
167:
168: print('Command:', self.command)
169:
170: for header in self.headers:
171: if header in proxy_ignored:
172: pass
173: elif header in ('Range'):
174: isRange = re.compile('bytes=(\d+)-(\d+)').match(self.headers[header])
175: if isRange:
176: requested_ranges = SpaceMap({int(isRange.group(1)): int(isRange.group(2)) + 1})
177: else:
178: return()
179: else:
180: print('Unknown header - ', header, ': ', self.headers[header], sep='')
181: return()
182: print(header, self.headers[header])
80f8e3804a 2010-08-20 183: print(self.path)
184:
185: # creating empty placeholder in index
186: # if there's no space map and there's no file in real directory - we have no file
187: # if there's an empty space map - file is full
188: # space map generally covers every bit of file we don't posess currently
80f8e3804a 2010-08-20 189: if not self.path in index:
190: info += '\nThis one is new.'
191: reload = True
192: record = {'_parts': None}
193: else:
80f8e3804a 2010-08-20 194: record = index[self.path]
80f8e3804a 2010-08-20 195: if '_parts' in index[self.path]:
196: print(record['_parts'])
80f8e3804a 2010-08-20 197: if index[self.path]['_parts'] == {0: -1}:
80f8e3804a 2010-08-20 198: index[self.path]['_parts'] = None
199:
80f8e3804a 2010-08-20 200: # creating file name from self.path
80f8e3804a 2010-08-20 201: file_name = options.dir + os.sep + re.compile('%20').sub(' ', self.path)
202: # partial file or unfinished download
80f8e3804a 2010-08-20 203: temp_name = options.dir + os.sep + '.parts' + re.compile('%20').sub(' ', self.path)
204:
205: # forcibly checking file if no file present
206: if os.access(file_name, os.R_OK):
207: file_stat = os.stat(file_name)
208: elif '_parts' in record and os.access(temp_name, os.R_OK):
209: file_stat = os.stat(temp_name)
210: elif not reload:
211: info += '\nFile not found or inaccessible.'
212: reload = True
213:
214: # forcibly checking file if file size doesn't match with index data
215: if not reload:
216: if '_parts' in record and record['_parts'] == SpaceMap():
217: if 'Content-Length' in record and file_stat and file_stat.st_size != int(record['Content-Length']):
218: info += '\nFile size is {} and stored file size is {}.'.format(file_stat.st_size, record['Content-Length'])
219: reload = True
220:
221: # forcibly checking file if index holds Pragma header
222: if not reload and 'Pragma' in record and record['Pragma'] == 'no-cache':
223: info +='\nPragma on: recheck imminent.'
224: recheck = True
225:
226: # skipping file processing if there's no need to recheck it and we have checked it at least 4 hours ago
227: if not recheck and not reload and '_time' in record and (datetime.datetime.now() - datetime.timedelta(hours = 4) - record['_time']).days < 0:
228: recheck = True
229:
230: print(info)
231: if reload or recheck:
232:
233: try:
80f8e3804a 2010-08-20 234: request = options.root + self.path
235: if requested_ranges != None:
236: if '_parts' in record and record['_parts'] != None:
237: needed = record['_parts'] & requested_ranges
238: else:
239: needed = requested_ranges
240: ranges = ()
241: print('Requesting ranges:', ranges)
242: print('Not stored ranges:', record['_parts'])
243: print('Requested ranges:', requested_ranges)
244: print('Needed ranges:', needed)
245: needed.rewind()
246: while True:
247: range = needed.pop()
248: if range[0] == None:
249: break
250: ranges += '{}-{}'.format(range[0], range[1] - 1),
251: request = urllib.request.Request(request, headers = {'Range': 'bytes=' + ','.join(ranges)})
252:
253: with urllib.request.urlopen(request) as source:
254: new_record = {}
255: new_record['_parts'] = record['_parts']
256: headers = source.info()
257:
258: # stripping unneeded headers (XXX make this inplace?)
259: for header in headers:
260: if header in desc_fields:
261: #if header == 'Pragma' and headers[header] != 'no-cache':
262: print(header, headers[header])
263: if header == 'Content-Length':
264: if 'Content-Range' not in headers:
265: new_record[header] = headers[header]
266: else:
267: new_record[header] = headers[header]
268: elif header == 'Content-Range':
269: range = re.compile('^bytes (\d+)-(\d+)/(\d+)$').match(headers[header])
270: if range:
271: new_record['Content-Length'] = range.group(3)
272: else:
273: assert False, 'Content-Range unrecognized.'
274: elif not header in ignore_fields:
275: print('Undefined header "', header, '": ', headers[header], sep='')
276:
277: if new_record['_parts'] == None:
278: new_record['_parts'] = SpaceMap({0: int(new_record['Content-Length'])})
279: print(new_record)
280:
281: # comparing headers with data found in index
282: # if any header has changed (except Pragma) file is fully downloaded
283: # same if we get more or less headers
284: old_keys = set(record.keys())
285: old_keys.discard('_time')
286: old_keys.discard('Pragma')
287: more_keys = set(new_record.keys()) - old_keys
288: more_keys.discard('Pragma')
289: less_keys = old_keys - set(new_record.keys())
290: if len(more_keys) > 0:
291: if not len(old_keys) == 0:
292: print('More headers appear:', more_keys)
293: reload = True
294: elif len(less_keys) > 0:
295: print('Less headers appear:', less_keys)
296: else:
297: for key in record.keys():
298: if key[0] != '_' and key != 'Pragma' and not record[key] == new_record[key]:
299: print('Header "', key, '" changed from [', record[key], '] to [', new_record[key], ']', sep='')
300: reload = True
301:
302: if reload:
303: print('Reloading.')
304: if os.access(temp_name, os.R_OK):
305: os.unlink(temp_name)
306: if os.access(file_name, os.R_OK):
307: os.unlink(file_name)
308:
309: # downloading file or segment
310: if 'Content-Length' in new_record:
311: if requested_ranges == None:
312: requested_ranges = new_record['_parts']
313: else:
314: if len(requested_ranges) > 1:
315: print("Multipart requests currently not supported.")
316: assert False, 'Skip this one for now.'
317: else:
318: assert False, 'No Content-Length or Content-Range header.'
319:
320: if reload:
321: new_record['_time'] = datetime.datetime.now()
322: if self.command not in ('HEAD'):
323: # file is created at temporary location and moved in place only when download completes
324: if not os.access(temp_name, os.R_OK):
325: empty_name = options.dir + os.sep + '.tmp'
326: with open(empty_name, 'w+b') as some_file:
327: pass
328: os.renames(empty_name, temp_name)
329: temp_file = open(temp_name, 'r+b')
330: requested_ranges.rewind()
331: while True:
332: (start, end) = requested_ranges.pop()
333: if start == None:
334: break
335: stream_last = start
336: old_record = new_record
337: if end - start < block_size:
338: req_block_size = end - start
339: else:
340: req_block_size = block_size
341: buffer = source.read(req_block_size)
342: print(buffer)
343: length = len(buffer)
344: while length > 0 and stream_last < end:
345: stream_pos = stream_last + length
346: assert not stream_pos > end, 'Received more data then requested: pos:{} start:{} end:{}.'.format(stream_pos, start, end)
347: print('Writing', length, 'bytes to temp file at position', stream_last)
348: temp_file.seek(stream_last)
349: temp_file.write(buffer)
350: new_record['_parts'] = new_record['_parts'] - SpaceMap({stream_last: stream_pos})
351: print(new_record)
80f8e3804a 2010-08-20 352: index[self.path] = old_record
353: index.sync()
354: old_record = new_record
355: stream_last = stream_pos
356: if end - stream_last < block_size:
357: req_block_size = end - stream_last
358: buffer = source.read(req_block_size)
359: print(buffer)
360: length = len(buffer)
361: print(new_record)
80f8e3804a 2010-08-20 362: index[self.path] = new_record
363: index.sync()
364: temp_file.close()
365:
366: # moving downloaded data to real file
367: if new_record['_parts'] == SpaceMap():
80f8e3804a 2010-08-20 368: if type(request) != str:
80f8e3804a 2010-08-20 369: # just moving
80f8e3804a 2010-08-20 370: # drop old dirs XXX
80f8e3804a 2010-08-20 371: print('Moving temporary file to new destination.')
80f8e3804a 2010-08-20 372: os.renames(temp_name, file_name)
373:
374: except urllib.error.HTTPError as error:
375: # in case of error we don't need to do anything actually,
376: # if file download stalls or fails the file would not be moved to it's location
377: print(error)
378:
379: if self.command == 'HEAD':
380: self.send_response(200)
80f8e3804a 2010-08-20 381: if 'Content-Length' in index[self.path]:
80f8e3804a 2010-08-20 382: self.send_header('Content-Length', index[self.path]['Content-Length'])
383: self.send_header('Accept-Ranges', 'bytes')
384: self.send_header('Content-Type', 'application/octet-stream')
80f8e3804a 2010-08-20 385: if 'Last-Modified' in index[self.path]:
80f8e3804a 2010-08-20 386: self.send_header('Last-Modified', index[self.path]['Last-Modified'])
387: self.end_headers()
388: else:
80f8e3804a 2010-08-20 389: if index[self.path]['_parts'] != SpaceMap():
390: file_name = temp_name
391:
392: with open(file_name, 'rb') as real_file:
393: file_stat = os.stat(file_name)
394: self.send_response(200)
80f8e3804a 2010-08-20 395: self.send_header('Last-Modified', index[self.path]['Last-Modified'])
396: if requested_ranges != None:
397: ranges = ()
398: requested_ranges.rewind()
399: while True:
400: pair = requested_ranges.pop()
401: if pair[0] == None:
402: break
403: ranges += '{}-{}'.format(pair[0], str(pair[1] - 1)),
80f8e3804a 2010-08-20 404: self.send_header('Content-Range', 'bytes ' + ','.join(ranges) + '/' + index[self.path]['Content-Length'])
405: else:
406: self.send_header('Content-Length', str(file_stat.st_size))
407: requested_ranges = SpaceMap({0: file_stat.st_size})
408: self.send_header('Content-Type', 'application/octet-stream')
409: self.end_headers()
410: if self.command in ('GET'):
411: requested_ranges.rewind()
412: (start, end) = requested_ranges.pop()
413: print('Seeking file to position', start)
414: real_file.seek(start)
415: if block_size > end - start:
416: req_block_size = end - start
417: else:
418: req_block_size = block_size
419: print('block_size is', req_block_size)
420: buffer = real_file.read(req_block_size)
421: length = len(buffer)
422: while length > 0:
423: self.wfile.write(buffer)
424: start += len(buffer)
425: if req_block_size > end - start:
426: req_block_size = end - start
427: if req_block_size == 0:
428: break
429: print('block_size is', req_block_size)
430: buffer = real_file.read(req_block_size)
431: length = len(buffer)
432:
433: def do_HEAD(self):
434: return self.__process()
435: def do_GET(self):
436: return self.__process()
437:
438: server = http.server.HTTPServer(('127.0.0.1', int(options.port)), MyRequestHandler)
439: server.serve_forever()
440:
441: else:
442: while True:
443: unchecked_files = set()
444: checked_files = 0
445:
446: # reading log and storing found urls for processing
447: # check file mtime XXX
448: with open(options.log, 'r') as log_file:
449: log_line = re.compile('^[^ ]+ - - \[.*] "(GET|HEAD) (.*?)(\?.*)? HTTP/1.1" (\d+) \d+ "(.*)" "(.*)"$')
450: for line in log_file:
451: this_line = log_line.match(line.strip())
452: if this_line:
453: unchecked_files.add(this_line.group(2))
454:
455: for url in unchecked_files:
456: reload = False
457: recheck = False
458: info = 'Checking file: ' + url
459:
460: # creating empty placeholder in index
461: if not url in index:
462: info += '\nThis one is new.'
463: index[url] = {}
464: reload = True
465:
466: # creating file name from url
467: file_name = options.dir + re.compile('%20').sub(' ', url)
468:
469: # forcibly checking file if no file present
470: if not reload and not os.access(file_name, os.R_OK):
471: info += '\nFile not found or inaccessible.'
472: reload = True
473:
474: # forcibly checking file if file size doesn't match with index data
475: elif not reload and 'Content-Length' in index[url] and os.stat(file_name).st_size != int(index[url]['Content-Length']):
476: info += '\nFile size is ' + os.stat(file_name).st_size + ' and stored file size is ' + index[url]['Content-Length'] + '.'
477: reload = True
478:
479: # forcibly checking file if index hods Pragma header
480: if not reload and 'Pragma' in index[url] and index[url]['Pragma'] == 'no-cache':
481: info +='\nPragma on: recheck imminent.'
482: recheck = True
483:
484: # skipping file processing if there's no need to recheck it and we have checked it at least 4 hours ago
485: if not recheck and not reload and (options.noupdate or ('_time' in index[url] and (datetime.datetime.now() - datetime.timedelta(hours = 4) - index[url]['_time']).days < 0)):
486: if options.verbose:
487: print(info)
488: continue
489: else:
490: print(info)
491:
492: try:
493: with urllib.request.urlopen(options.root + url) as source:
494: new_headers = {}
495: headers = source.info()
496:
497: # stripping unneeded headers (XXX make this inplace?)
498: for header in headers:
499: if header in desc_fields:
500: if header == 'Pragma' and headers[header] != 'no-cache':
501: print('Pragma:', headers[header])
502: new_headers[header] = headers[header]
503: elif not header in ignore_fields:
504: print('Undefined header "', header, '": ', headers[header], sep='')
505:
506: # comparing headers with data found in index
507: # if any header has changed (except Pragma) file is fully downloaded
508: # same if we get more or less headers
509: old_keys = set(index[url].keys())
510: old_keys.discard('_time')
511: old_keys.discard('Pragma')
512: more_keys = set(new_headers.keys()) - old_keys
513: more_keys.discard('Pragma')
514: less_keys = old_keys - set(new_headers.keys())
515: if len(more_keys) > 0:
516: if not len(old_keys) == 0:
517: print('More headers appear:', more_keys)
518: reload = True
519: elif len(less_keys) > 0:
520: print('Less headers appear:', less_keys)
521: else:
522: for key in index[url].keys():
523: if key[0] != '_' and key != 'Pragma' and not index[url][key] == new_headers[key]:
524: print('Header "', key, '" changed from [', index[url][key], '] to [', new_headers[key], ']', sep='')
525: reload = True
526:
527: # downloading file
528: if reload:
529: if 'Content-Length' in headers:
530: print('Downloading', headers['Content-Length'], 'bytes [', end='')
531: else:
532: print('Downloading [', end='')
533: sys.stdout.flush()
534:
535: # file is created at temporary location and moved in place only when download completes
536: temp_file = open(options.dir + os.sep + '.tmp', 'wb')
537: buffer = source.read(block_size)
538: megablocks = 0
539: blocks = 0
540: megs = 0
541: while len(buffer) > 0:
542: temp_file.write(buffer)
543: buffer = source.read(block_size)
544: blocks += 1
545: if blocks > 102400/block_size:
546: megablocks += 1
547: if megablocks > 10:
548: megablocks = megablocks - 10
549: megs += 1
550: print('{}Mb'.format(megs), end='')
551: else:
552: print('.', end='')
553: blocks = blocks - 102400/block_size
554: sys.stdout.flush()
555: temp_file.close()
556: print(']')
557: os.renames(options.dir + os.sep + '.tmp', file_name)
558:
559: checked_files += 1
560:
561: # storing new time mark and storing new headers
562: new_headers['_time'] = datetime.datetime.now()
563: index[url] = new_headers
564: index.sync()
565:
566: except urllib.error.HTTPError as error:
567: # in case of error we don't need to do anything actually,
568: # if file download stalls or fails the file would not be moved to it's location
569: print(error)
570:
571: if options.verbose:
572: print('[', len(unchecked_files), '/', checked_files, ']')
573:
574: # checking if there were any files downloaded, if yes - restarting sequence
575: if checked_files == 0:
576: break