Lines of
samesite.py
from check-in fb10031536
that are changed by the sequence of edits moving toward
check-in e7b837a681:
1: #!/usr/bin/env python3.1
2:
fb10031536 2010-08-21 3: import datetime, http.cookiejar, optparse, os, sys, shelve, re, urllib.request
fb10031536 2010-08-21 4:
fb10031536 2010-08-21 5: from spacemap import SpaceMap
fb10031536 2010-08-21 6:
fb10031536 2010-08-21 7: parser = optparse.OptionParser()
fb10031536 2010-08-21 8: parser.add_option('-v', '--verbose', action = 'store_true', dest = 'verbose', help = 'turns on verbose status notifications', metavar = 'bool', default = False)
fb10031536 2010-08-21 9: parser.add_option('-d', '--dir', action = 'store', dest = 'dir', help = 'specify directory where the files should be stored', metavar = 'string', default = None)
fb10031536 2010-08-21 10: parser.add_option('-r', '--root', action = 'store', dest = 'root', help = 'specify a site from which data should be mirrored', metavar = 'string', default = None)
fb10031536 2010-08-21 11: parser.add_option('-l', '--log', action = 'store', dest = 'log', help = 'specify a log file to process', metavar = 'string', default = None)
fb10031536 2010-08-21 12: parser.add_option('-e', '--skip-etag', action = 'store_true', dest = 'noetag', help = 'do not process etags', metavar = 'bool', default = False)
fb10031536 2010-08-21 13: parser.add_option('-p', '--port', action = 'store', dest = 'port', help = 'listen on this port for incoming connections', metavar = 'integer', default = None)
fb10031536 2010-08-21 14: parser.add_option('-n', '--no-update', action = 'store_true', dest = 'noupdate', help = 'do not update already downloaded files', metavar = 'bool', default = 'False')
fb10031536 2010-08-21 15: (options, args) = parser.parse_args()
fb10031536 2010-08-21 16:
fb10031536 2010-08-21 17: assert options.dir, 'Directory not specified'
fb10031536 2010-08-21 18: assert options.root, 'Server not specified'
fb10031536 2010-08-21 19: assert options.log or options.port, 'Log file or port not specified'
fb10031536 2010-08-21 20: assert options.port or os.access(options.log, os.R_OK), 'Log file unreadable'
fb10031536 2010-08-21 21:
fb10031536 2010-08-21 22: optionsDirWithSep = re.compile('^(.*?)/?$').match(options.dir)
fb10031536 2010-08-21 23: if optionsDirWithSep:
fb10031536 2010-08-21 24: options.dir = optionsDirWithSep.group(1)
fb10031536 2010-08-21 25:
fb10031536 2010-08-21 26: # this is file index - everything is stored in this file
fb10031536 2010-08-21 27: # _parts - list of stored parts of file
fb10031536 2010-08-21 28: # _time - last time the file was checked
fb10031536 2010-08-21 29: # everything else is just the headers
fb10031536 2010-08-21 30: index = shelve.open(options.dir + os.sep + '.index')
fb10031536 2010-08-21 31: desc_fields = ('Content-Length', 'Pragma', 'Last-Modified')
fb10031536 2010-08-21 32: ignore_fields = ('Accept-Ranges', 'Age', 'Cache-Control', 'Connection', 'Content-Type', 'Date', 'Expires', 'Server', 'Via', 'X-Cache', 'X-Cache-Lookup', 'X-Powered-By')
fb10031536 2010-08-21 33:
fb10031536 2010-08-21 34: if not options.noetag:
fb10031536 2010-08-21 35: desc_fields += 'ETag',
fb10031536 2010-08-21 36: else:
fb10031536 2010-08-21 37: ignore_fields += 'ETag',
38:
39: block_size = 4096
fb10031536 2010-08-21 40:
fb10031536 2010-08-21 41: temp_file_name = options.dir + os.sep + '.tmp'
42:
43: '''
44: # later, kqueue would be good but later
45: class Connection:
46: __slots__ = frozenset(('__address', '__input', '__socket', '__status', 'error', 'method', 'url', 'http_version'))
47:
48: def __init__(self, socket, address):
49: self.__address = address
50: self.__input = b''
51: self.__socket = socket
52: self.__status = 0
53:
54: def read(self, kev):
55: buffer = self.__socket.recv(kev.data)
56: exhausted = False
57: if len(buffer) == 0:
58: eof = True
59: else:
60: self.__input += buffer
61: while not exhausted:
62: if self.__status == -1:
63: exhausted = True
64: elif self.__status == 0:
65: endstring = self.__input.find(b'\n')
66: if endstring > 0:
67: print('Processing request line.')
68: line = self.__input[:endstring].decode('ascii')
69: self.__input = self.__input[endstring + 1:]
70: isRequest = re.compile('(GET) ([^ ]+) HTTP/(1\.0)').match(line)
71: if not isRequest:
72: self.error = 'Not a HTTP connection.'
73: self.__status = -1
74: else:
75: self.method = isRequest.group(1)
76: self.url = isRequest.group(2)
77: self.http_version = isRequest.group(3)
78: self.__status = 1
79: else:
80: exhausted = True
81: elif self.__status == 1:
82: endstring = self.__input.find(b'\n')
83: if endstring > 0:
84: print('Processing header line.' + repr(self.__input))
85: line = self.__input[:endstring].decode('ascii')
86: self.__input = self.__input[endstring + 1:]
87: isHeader = re.compile('([^:]*): +(.*)').match(line)
88: if not isHeader:
89: self.error = 'Bad header.'
90: return(False)
91: # process header here
92: elif endstring == 0:
93: self.__status = 2
94: else:
95: exhausted = True
96:
97: def write(self, kev):
98: pass
99:
100: if options.port:
101: import select, socket
102:
103: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
104: try:
105: sock.bind(('127.0.0.1', int(options.port)))
106: sock.listen(-1)
107:
108: kq = select.kqueue()
109: assert kq.fileno() != -1, "Fatal error: can't initialise kqueue."
110:
111: kq.control([select.kevent(sock, select.KQ_FILTER_READ, select.KQ_EV_ADD)], 0)
112: timeout = None
113:
114: connections = {sock.fileno(): None}
115:
116: while True:
117: kevs = kq.control(None, 1, timeout)
118:
119: for kev in kevs:
120: if type(connections[kev.ident]) == Connection:
121: print(kev.ident, kev.data, kev.filter, kev.flags)
122: assert kev.data != 0, 'No data available.'
123: if kev.filter == select.KQ_FILTER_READ:
124: connections[kev.ident].read(kev)
125: elif kev.filter == select.KQ_FILTER_WRITE:
126: connections[kev.ident].write(kev)
127: else:
128: assert kev.filter in (select.KQ_FILTER_READ, select.KQ_FILTER_WRITE), 'Do we support other filters?'
129: else:
130: (conn, addr) = sock.accept()
131: print('Connection from ' + repr(addr))
132: kq.control([select.kevent(conn, select.KQ_FILTER_READ, select.KQ_EV_ADD)], 0)
133: connections[conn.fileno()] = Connection(conn, addr)
134:
135: if kev.flags >> 15 == 1:
136: kq.control([select.kevent(kev.ident, select.KQ_FILTER_READ, select.KQ_EV_DELETE)], 0)
137: kq.control([select.kevent(kev.ident, select.KQ_FILTER_WRITE, select.KQ_EV_DELETE)], 0)
138: del(connections[kev.ident])
139: finally:
140: sock.close()
141: '''
142:
fb10031536 2010-08-21 143: if options.port:
144: import http.server
145:
146: class MyRequestHandler(http.server.BaseHTTPRequestHandler):
147: def __process(self):
148: # reload means file needs to be reloaded to serve request
149: reload = False
150: # recheck means file needs to be checked, this also means that if file hav been modified we can serve older copy
151: recheck = False
152: # file_stat means file definitely exists
153: file_stat = None
154: # requested_ranges holds data about any range requested
155: requested_ranges = None
156: # records holds data from index locally, should be written back upon successfull completion
157: record = None
158: info = 'Checking file: ' + self.path
159:
160: myPath = re.compile('^(.*?)(\?.*)$').match(self.path)
161: if myPath:
162: my_path = myPath.group(1)
163: else:
164: my_path = self.path
165:
166: proxy_ignored = ('Accept', 'Accept-Encoding',
167: 'Cache-Control', 'Connection',
168: 'Host',
169: 'If-Modified-Since', 'If-Unmodified-Since',
170: 'User-Agent',
171: 'Via',
172: 'X-Forwarded-For',
173: )
174:
175: print('===============[ {} request ]==='.format(self.command))
176:
177: for header in self.headers:
178: if header in proxy_ignored:
179: pass
180: elif header in ('Range'):
181: isRange = re.compile('bytes=(\d+)-(\d+)').match(self.headers[header])
182: if isRange:
fb10031536 2010-08-21 183: requested_ranges = SpaceMap({int(isRange.group(1)): int(isRange.group(2)) + 1})
184: else:
185: return()
186: else:
187: print('Unknown header - ', header, ': ', self.headers[header], sep='')
188: return()
189: print(header, self.headers[header])
190:
191: # creating empty placeholder in index
192: # if there's no space map and there's no file in real directory - we have no file
193: # if there's an empty space map - file is full
194: # space map generally covers every bit of file we don't posess currently
195: if not my_path in index:
196: info += '\nThis one is new.'
197: reload = True
fb10031536 2010-08-21 198: record = {'_parts': None}
199: else:
200: record = index[my_path]
fb10031536 2010-08-21 201: if '_parts' in index[my_path]:
fb10031536 2010-08-21 202: if index[my_path]['_parts'] == {0: -1}:
fb10031536 2010-08-21 203: index[my_path]['_parts'] = None
204:
205: # creating file name from my_path
fb10031536 2010-08-21 206: file_name = options.dir + os.sep + re.compile('%20').sub(' ', my_path)
207: # partial file or unfinished download
fb10031536 2010-08-21 208: temp_name = options.dir + os.sep + '.parts' + re.compile('%20').sub(' ', my_path)
209:
210: # forcibly checking file if no file present
211: if os.access(file_name, os.R_OK):
212: file_stat = os.stat(file_name)
213: elif '_parts' in record and os.access(temp_name, os.R_OK):
214: file_stat = os.stat(temp_name)
215: elif not reload:
216: info += '\nFile not found or inaccessible.'
fb10031536 2010-08-21 217: record = {'_parts': None}
218: reload = True
219:
220: # forcibly checking file if file size doesn't match with index data
221: if not reload:
fb10031536 2010-08-21 222: if '_parts' in record and record['_parts'] == SpaceMap():
223: if 'Content-Length' in record and file_stat and file_stat.st_size != int(record['Content-Length']):
224: info += '\nFile size is {} and stored file size is {}.'.format(file_stat.st_size, record['Content-Length'])
225: reload = True
226:
227: # forcibly checking file if index holds Pragma header
228: if not reload and 'Pragma' in record and record['Pragma'] == 'no-cache':
229: info +='\nPragma on: recheck imminent.'
230: recheck = True
231:
232: # skipping file processing if there's no need to recheck it and we have checked it at least 4 hours ago
233: if not recheck and not reload and '_time' in record and (datetime.datetime.now() - datetime.timedelta(hours = 4) - record['_time']).days < 0:
234: recheck = True
235:
236: print(info)
237: if reload or recheck:
238:
239: try:
fb10031536 2010-08-21 240: request = options.root + my_path
241: needed = None
242: if requested_ranges != None:
243: if '_parts' in record and record['_parts'] != None:
244: needed = record['_parts'] & requested_ranges
245: else:
246: needed = requested_ranges
247: ranges = ()
248: print('Missing ranges: {}, requested ranges: {}, needed ranges: {}.'.format(record['_parts'], requested_ranges, needed))
249: if len(needed) > 0:
250: needed.rewind()
251: while True:
252: range = needed.pop()
253: if range[0] == None:
254: break
255: ranges += '{}-{}'.format(range[0], range[1] - 1),
256: request = urllib.request.Request(request, headers = {'Range': 'bytes=' + ','.join(ranges)})
257:
258: with urllib.request.urlopen(request) as source:
259: new_record = {}
260: new_record['_parts'] = record['_parts']
261: headers = source.info()
262:
263: # stripping unneeded headers (XXX make this inplace?)
264: for header in headers:
265: if header in desc_fields:
266: #if header == 'Pragma' and headers[header] != 'no-cache':
267: if header == 'Content-Length':
268: if 'Content-Range' not in headers:
269: new_record[header] = int(headers[header])
270: else:
271: new_record[header] = headers[header]
272: elif header == 'Content-Range':
273: range = re.compile('^bytes (\d+)-(\d+)/(\d+)$').match(headers[header])
274: if range:
275: new_record['Content-Length'] = int(range.group(3))
276: else:
277: assert False, 'Content-Range unrecognized.'
278: elif not header in ignore_fields:
279: print('Undefined header "', header, '": ', headers[header], sep='')
280:
281: # comparing headers with data found in index
282: # if any header has changed (except Pragma) file is fully downloaded
283: # same if we get more or less headers
284: old_keys = set(record.keys())
285: old_keys.discard('_time')
286: old_keys.discard('Pragma')
287: more_keys = set(new_record.keys()) - old_keys
288: more_keys.discard('Pragma')
289: less_keys = old_keys - set(new_record.keys())
290: if len(more_keys) > 0:
291: if not len(old_keys) == 0:
292: print('More headers appear:', more_keys)
293: reload = True
294: elif len(less_keys) > 0:
295: print('Less headers appear:', less_keys)
296: else:
297: for key in record.keys():
298: if key[0] != '_' and key != 'Pragma' and not record[key] == new_record[key]:
299: print('Header "', key, '" changed from [', record[key], '] to [', new_record[key], ']', sep='')
300: print(type(record[key]), type(new_record[key]))
301: reload = True
302:
303: if reload:
304: print('Reloading.')
305: if os.access(temp_name, os.R_OK):
306: os.unlink(temp_name)
307: if os.access(file_name, os.R_OK):
308: os.unlink(file_name)
309: if new_record['_parts'] == None or reload:
fb10031536 2010-08-21 310: new_record['_parts'] = SpaceMap({0: int(new_record['Content-Length'])})
311: print(new_record)
312:
313: # downloading file or segment
314: if 'Content-Length' in new_record:
315: if needed == None:
316: needed = new_record['_parts']
317: else:
318: if len(needed) > 1:
319: print("Multipart requests currently not supported.")
320: assert False, 'Skip this one for now.'
321: else:
322: assert False, 'No Content-Length or Content-Range header.'
323:
324: new_record['_time'] = datetime.datetime.now()
325: if self.command not in ('HEAD'):
326: # file is created at temporary location and moved in place only when download completes
327: if not os.access(temp_name, os.R_OK):
fb10031536 2010-08-21 328: empty_name = options.dir + os.sep + '.tmp'
329: with open(empty_name, 'w+b') as some_file:
330: pass
331: os.renames(empty_name, temp_name)
332: temp_file = open(temp_name, 'r+b')
333: needed.rewind()
334: while True:
335: (start, end) = needed.pop()
336: if start == None:
337: break
338: stream_last = start
339: old_record = new_record
340: if end - start < block_size:
341: req_block_size = end - start
342: else:
343: req_block_size = block_size
344: buffer = source.read(req_block_size)
345: length = len(buffer)
346: while length > 0 and stream_last < end:
347: stream_pos = stream_last + length
348: assert not stream_pos > end, 'Received more data then requested: pos:{} start:{} end:{}.'.format(stream_pos, start, end)
349: temp_file.seek(stream_last)
350: temp_file.write(buffer)
fb10031536 2010-08-21 351: new_record['_parts'] = new_record['_parts'] - SpaceMap({stream_last: stream_pos})
352: index[my_path] = old_record
353: index.sync()
354: old_record = new_record
355: stream_last = stream_pos
356: if end - stream_last < block_size:
357: req_block_size = end - stream_last
358: buffer = source.read(req_block_size)
359: length = len(buffer)
360: # moving downloaded data to real file
361: temp_file.close()
fb10031536 2010-08-21 362: if new_record['_parts'] == SpaceMap():
fb10031536 2010-08-21 363: # just moving
fb10031536 2010-08-21 364: # drop old dirs XXX
fb10031536 2010-08-21 365: print('Moving temporary file to new destination.')
fb10031536 2010-08-21 366: os.renames(temp_name, file_name)
367:
368: print(new_record)
369: index[my_path] = new_record
370: index.sync()
371:
372: except urllib.error.HTTPError as error:
373: # in case of error we don't need to do anything actually,
374: # if file download stalls or fails the file would not be moved to it's location
375: print(error)
376:
377: if self.command == 'HEAD':
378: self.send_response(200)
379: if 'Content-Length' in index[my_path]:
380: self.send_header('Content-Length', index[my_path]['Content-Length'])
381: self.send_header('Accept-Ranges', 'bytes')
382: self.send_header('Content-Type', 'application/octet-stream')
383: if 'Last-Modified' in index[my_path]:
384: self.send_header('Last-Modified', index[my_path]['Last-Modified'])
385: self.end_headers()
386: else:
fb10031536 2010-08-21 387: if index[my_path]['_parts'] != SpaceMap():
388: file_name = temp_name
389:
390: with open(file_name, 'rb') as real_file:
391: file_stat = os.stat(file_name)
392: if 'Range' in self.headers:
393: self.send_response(206)
394: ranges = ()
395: requested_ranges.rewind()
396: while True:
397: pair = requested_ranges.pop()
398: if pair[0] == None:
399: break
400: ranges += '{}-{}'.format(pair[0], str(pair[1] - 1)),
401: self.send_header('Content-Range', 'bytes {}/{}'.format(','.join(ranges), index[my_path]['Content-Length']))
402: else:
403: self.send_response(200)
404: self.send_header('Content-Length', str(file_stat.st_size))
fb10031536 2010-08-21 405: requested_ranges = SpaceMap({0: file_stat.st_size})
406: self.send_header('Last-Modified', index[my_path]['Last-Modified'])
407: self.send_header('Content-Type', 'application/octet-stream')
408: self.end_headers()
409: if self.command in ('GET'):
410: if len(requested_ranges) > 0:
411: requested_ranges.rewind()
412: (start, end) = requested_ranges.pop()
413: else:
414: start = 0
415: end = index[my_path]['Content-Length']
416: real_file.seek(start)
417: if block_size > end - start:
418: req_block_size = end - start
419: else:
420: req_block_size = block_size
421: buffer = real_file.read(req_block_size)
422: length = len(buffer)
423: while length > 0:
424: self.wfile.write(buffer)
425: start += len(buffer)
426: if req_block_size > end - start:
427: req_block_size = end - start
428: if req_block_size == 0:
429: break
430: buffer = real_file.read(req_block_size)
431: length = len(buffer)
432:
433: def do_HEAD(self):
434: return self.__process()
435: def do_GET(self):
436: return self.__process()
437:
fb10031536 2010-08-21 438: server = http.server.HTTPServer(('127.0.0.1', int(options.port)), MyRequestHandler)
439: server.serve_forever()
440:
441: else:
442: while True:
443: unchecked_files = set()
444: checked_files = 0
445:
446: # reading log and storing found urls for processing
447: # check file mtime XXX
448: with open(options.log, 'r') as log_file:
449: log_line = re.compile('^[^ ]+ - - \[.*] "(GET|HEAD) (.*?)(\?.*)? HTTP/1.1" (\d+) \d+ "(.*)" "(.*)"$')
450: for line in log_file:
451: this_line = log_line.match(line.strip())
452: if this_line:
453: unchecked_files.add(this_line.group(2))
454:
455: for url in unchecked_files:
456: reload = False
457: recheck = False
458: info = 'Checking file: ' + url
459:
460: # creating empty placeholder in index
461: if not url in index:
462: info += '\nThis one is new.'
463: index[url] = {}
464: reload = True
465:
466: # creating file name from url
467: file_name = options.dir + re.compile('%20').sub(' ', url)
468:
469: # forcibly checking file if no file present
470: if not reload and not os.access(file_name, os.R_OK):
471: info += '\nFile not found or inaccessible.'
472: reload = True
473:
474: # forcibly checking file if file size doesn't match with index data
475: elif not reload and 'Content-Length' in index[url] and os.stat(file_name).st_size != int(index[url]['Content-Length']):
476: info += '\nFile size is ' + os.stat(file_name).st_size + ' and stored file size is ' + index[url]['Content-Length'] + '.'
477: reload = True
478:
479: # forcibly checking file if index hods Pragma header
480: if not reload and 'Pragma' in index[url] and index[url]['Pragma'] == 'no-cache':
481: info +='\nPragma on: recheck imminent.'
482: recheck = True
483:
484: # skipping file processing if there's no need to recheck it and we have checked it at least 4 hours ago
485: if not recheck and not reload and (options.noupdate or ('_time' in index[url] and (datetime.datetime.now() - datetime.timedelta(hours = 4) - index[url]['_time']).days < 0)):
486: if options.verbose:
487: print(info)
488: continue
489: else:
490: print(info)
491:
492: try:
493: with urllib.request.urlopen(options.root + url) as source:
494: new_headers = {}
495: headers = source.info()
496:
497: # stripping unneeded headers (XXX make this inplace?)
498: for header in headers:
499: if header in desc_fields:
500: if header == 'Pragma' and headers[header] != 'no-cache':
501: print('Pragma:', headers[header])
502: new_headers[header] = headers[header]
503: elif not header in ignore_fields:
504: print('Undefined header "', header, '": ', headers[header], sep='')
505:
506: # comparing headers with data found in index
507: # if any header has changed (except Pragma) file is fully downloaded
508: # same if we get more or less headers
509: old_keys = set(index[url].keys())
510: old_keys.discard('_time')
511: old_keys.discard('Pragma')
512: more_keys = set(new_headers.keys()) - old_keys
513: more_keys.discard('Pragma')
514: less_keys = old_keys - set(new_headers.keys())
515: if len(more_keys) > 0:
516: if not len(old_keys) == 0:
517: print('More headers appear:', more_keys)
518: reload = True
519: elif len(less_keys) > 0:
520: print('Less headers appear:', less_keys)
521: else:
522: for key in index[url].keys():
523: if key[0] != '_' and key != 'Pragma' and not index[url][key] == new_headers[key]:
524: print('Header "', key, '" changed from [', index[url][key], '] to [', new_headers[key], ']', sep='')
525: reload = True
526:
527: # downloading file
528: if reload:
529: if 'Content-Length' in headers:
530: print('Downloading', headers['Content-Length'], 'bytes [', end='')
531: else:
532: print('Downloading [', end='')
533: sys.stdout.flush()
534:
535: # file is created at temporary location and moved in place only when download completes
536: temp_file = open(options.dir + os.sep + '.tmp', 'wb')
537: buffer = source.read(block_size)
538: megablocks = 0
539: blocks = 0
540: megs = 0
541: while len(buffer) > 0:
542: temp_file.write(buffer)
543: buffer = source.read(block_size)
544: blocks += 1
545: if blocks > 102400/block_size:
546: megablocks += 1
547: if megablocks > 10:
548: megablocks = megablocks - 10
549: megs += 1
550: print('{}Mb'.format(megs), end='')
551: else:
552: print('.', end='')
553: blocks = blocks - 102400/block_size
554: sys.stdout.flush()
555: temp_file.close()
556: print(']')
557: os.renames(options.dir + os.sep + '.tmp', file_name)
558:
559: checked_files += 1
560:
561: # storing new time mark and storing new headers
562: new_headers['_time'] = datetime.datetime.now()
563: index[url] = new_headers
564: index.sync()
565:
566: except urllib.error.HTTPError as error:
567: # in case of error we don't need to do anything actually,
568: # if file download stalls or fails the file would not be moved to it's location
569: print(error)
570:
571: if options.verbose:
572: print('[', len(unchecked_files), '/', checked_files, ']')
573:
574: # checking if there were any files downloaded, if yes - restarting sequence
575: if checked_files == 0:
576: break