rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
if self.pointer == i: clr.append('selected')
y = i + 1 clr = deque(base_clr)
def draw(self): base_clr = deque() base_clr.append('in_taskview') lst = self.get_list()
descr = obj.get_description() self.addstr(y, 0, descr, self.wid) self.color_at(y, 0, self.wid, clr)
if self.pointer == i: clr.append('selected')
def draw(self): base_clr = deque() base_clr.append('in_taskview') lst = self.get_list()
else: if self.hei > 1: self.addstr(1, 0, "No task in the queue.") self.color_at(1, 0, self.wid, base_clr, 'error')
descr = obj.get_description() self.addstr(y, 0, descr, self.wid) self.color_at(y, 0, self.wid, clr)
def draw(self): base_clr = deque() base_clr.append('in_taskview') lst = self.get_list()
self.color_reset()
else: if self.hei > 1: self.addstr(1, 0, "No task in the queue.") self.color_at(1, 0, self.wid, base_clr, 'error') self.color_reset()
def draw(self): base_clr = deque() base_clr.append('in_taskview') lst = self.get_list()
alias(e=quit)
alias(e=edit)
def alias(**kw): for key, value in kw.items(): by_name[key] = value
@depends_on(app_editor, Applications.app_self)
def app_editor(self, c): try: default_editor = os.environ['EDITOR'] except KeyError: pass else: parts = default_editor.split() exe_name = os.path.basename(parts[0]) if exe_name in get_executables(): return tuple(parts) + tuple(c)
sys.stdout.write("\033]2;" + hostname + \ self.fm.env.pwd.path + "\007")
sys.stdout.write("\033]2;" + hostname + cwd + "\007")
def draw(self): """Erase the window, then draw all objects in the container""" self.win.touchwin() DisplayableContainer.draw(self) if self.settings.update_title: hostname = str(socket.gethostname()) try: cwd = self.fm.env.pwd.path except: cwd = ' - ranger' sys.stdout.write("\033]2;" + hostname + \ self.fm.env.pwd.path + "\007") self.win.refresh()
import os
def load_content_if_outdated(self, *a, **k): """ Load the contents of the directory if it's outdated or not done yet """
cached_mtime = self.stat.st_mtime
cached_mtime = self.load_content_mtime
def load_content_if_outdated(self, *a, **k): """ Load the contents of the directory if it's outdated or not done yet """
return True
return False
def has_preview(self): if not self.fm.settings.preview_files: return False if self.is_socket or self.is_fifo or self.is_device: return False if not self.accessible: return False if self.image or self.container: return True if PREVIEW_WHITELIST.search(self.basename): return True if PREVIEW_BLACKLIST.search(self.basename): return False if self.path == '/dev/core' or self.path == '/proc/kcore': return False if self.is_binary(): return False return True
if self.fm.settings.preview_script: return True
def has_preview(self): if self.fm.settings.preview_script: return True if not self.fm.settings.preview_files: return False if self.is_socket or self.is_fifo or self.is_device: return False if not self.accessible: return False if self.image or self.container: return False if PREVIEW_WHITELIST.search(self.basename): return True if PREVIEW_BLACKLIST.search(self.basename): return False if self.path == '/dev/core' or self.path == '/proc/kcore': return False if self.is_binary(): return False return True
self.notify("%s complete" % path)
def on_after(signal): self.notify("%s complete" % path) exit = signal.process.poll() content = signal.loader.stdout_buffer content += signal.process.stdout.read() if exit == 0: data[(width, height)] = content elif exit == 3: data[(-1, height)] = content elif exit == 4: data[(width, -1)] = content elif exit == 5: data[(-1, -1)] = content else: data[(-1, -1)] = None # XXX if self.env.cf.path == path: self.ui.browser.pager.need_redraw = True self.ui.browser.need_redraw = True data['loading'] = False
result = self.settings.collapse_preview and self.preview and \ not self.columns[-1].has_preview() and self.stretch_ratios if result: return True if self.columns[-1].target: target = self.columns[-1].target
if not self.settings.collapse_preview or not self.preview \ or not self.stretch_ratios: return False result = not self.columns[-1].has_preview() target = self.columns[-1].target if not result and target and target.is_file:
def _collapse(self): # Should the last column be cut off? (Because there is no preview) result = self.settings.collapse_preview and self.preview and \ not self.columns[-1].has_preview() and self.stretch_ratios if result: return True if self.columns[-1].target: target = self.columns[-1].target try: result = not self.fm.previews[target.realpath]['foundpreview'] except: return self.old_collapse self.old_collapse = result return result
for child in reversed(rows): if not child.has_preview(): right_end = child.x - 1 else: break if right_end < left_start: right_end = self.wid - 1
if not self.pager.visible: for child in reversed(rows): if not child.has_preview(): right_end = child.x - 1 else: break if right_end < left_start: right_end = self.wid - 1
def _draw_borders(self): win = self.win self.color('in_browser', 'border')
max(1, self.wid - left))
max(1, self.wid - left - pad))
def resize(self, y, x, hei, wid): """Resize all the columns according to the given ratio""" DisplayableContainer.resize(self, y, x, hei, wid) borders = self.settings.draw_borders pad = 1 if borders else 0 left = pad
if self.end_hook(process):
if self.end_hook:
def generate(self): self.process = process = Popen(self.args, stdout=open(os.devnull, 'w'), stderr=PIPE) if self.begin_hook: self.begin_hook(process) while process.poll() is None: rd, _, __ = select.select( [process.stderr], [], [], 0.05) if rd: self.notify(process.stderr.readline(), bad=True) sleep(0.02) yield if self.end_hook(process): self.end_hook(process)
and cls.allow_abbrev]
and cls.allow_abbrev \ or cmd == name]
def get_command(name, abbrev=True): if abbrev: lst = [cls for cmd, cls in by_name.items() \ if cmd.startswith(name) \ and cls.allow_abbrev] if len(lst) == 0: raise KeyError if len(lst) == 1: return lst[0] raise ValueError("Ambiguous command") else: try: return by_name[name] except KeyError: return None
i = absolute
if absolute < 0: i = absolute + len(self.lines) else: i = absolute
def move(self, relative=0, absolute=None, pages=False): i = self.scroll_begin if isinstance(absolute, int): i = absolute
if chr(key) in digits and ANYKEY not in tree:
if key in digitlist and ANYKEY not in tree:
def _do_eval_quantifier(self, key): if self.eval_command: tree = self.tree_pointer else: tree = self.dir_tree_pointer if chr(key) in digits and ANYKEY not in tree: attr = self.eval_command and 'quant' or 'direction_quant' if getattr(self, attr) is None: setattr(self, attr, 0) setattr(self, attr, getattr(self, attr) * 10 + key - 48) else: self.eval_quantifier = False return None return True
chr(key) in digits or self.direction_keys._tree[key]
key in digitlist or self.direction_keys._tree[key]
def _do_eval_command(self, key): assert isinstance(self.tree_pointer, dict), self.tree_pointer try: self.tree_pointer = self.tree_pointer[key] except TypeError: self.failure = True return None except KeyError: try: chr(key) in digits or self.direction_keys._tree[key] self.tree_pointer = self.tree_pointer[DIRKEY] except KeyError: try: self.tree_pointer = self.tree_pointer[ANYKEY] except KeyError: self.failure = True return None else: self.matches.append(key) assert isinstance(self.tree_pointer, (Binding, dict)) self._try_to_finish() else: assert isinstance(self.tree_pointer, (Binding, dict)) self.eval_command = False self.eval_quantifier = True self.dir_tree_pointer = self.direction_keys._tree else: if isinstance(self.tree_pointer, dict): try: self.command = self.tree_pointer[PASSIVE_ACTION] except (KeyError, TypeError): self.command = None self._try_to_finish()
kw['stdout'] = sys.stderr
kw['stdout'] = sys.stdout
def run(self): """ Run the application in the way specified by the options.
log("freeing {0}".format(self))
def __del__(self): log("freeing {0}".format(self)) for handler in self.handlers: self.settings.signal_unbind(handler)
for key, val in special_keys.items():
for key, val in tuple(special_keys.items()):
def parse_keybinding(obj): """ Translate a keybinding to a sequence of integers Example: lol<CR> => (ord('l'), ord('o'), ord('l'), ord('\n')) => (108, 111, 108, 10) x<A-Left> => (120, (27, curses.KEY_LEFT)) """ assert isinstance(obj, (tuple, int, str)) if isinstance(obj, tuple): for char in obj: yield char elif isinstance(obj, int): yield obj elif isinstance(obj, str): in_brackets = False bracket_content = None for char in obj: if in_brackets: if char == '>': in_brackets = False string = ''.join(bracket_content).lower() try: keys = special_keys[string] for key in keys: yield key except KeyError: yield ord('<') for c in bracket_content: yield ord(c) yield ord('>') except TypeError: yield keys # it was no tuple, just an int else: bracket_content.append(char) else: if char == '<': in_brackets = True bracket_content = [] else: yield ord(char) if in_brackets: yield ord('<') for c in bracket_content: yield ord(c)
default_editor = os.environ['EDITOR'] parts = default_editor.split() exe_name = os.path.basename(parts[0])
try: default_editor = os.environ['EDITOR'] except KeyError: pass else: parts = default_editor.split() exe_name = os.path.basename(parts[0]) if exe_name in self.fm.executables: return tuple(parts) + tuple(c)
def app_editor(self, c): default_editor = os.environ['EDITOR'] parts = default_editor.split() exe_name = os.path.basename(parts[0])
if exe_name in self.fm.executables: return tuple(parts) + tuple(c) else: return self.either(c, 'vim', 'emacs', 'nano')
return self.either(c, 'vim', 'emacs', 'nano')
def app_editor(self, c): default_editor = os.environ['EDITOR'] parts = default_editor.split() exe_name = os.path.basename(parts[0])
pos = self.wid - 3
pos = self.wid - 1
def click(self, event): """Handle a MouseEvent""" direction = event.mouse_wheel_direction() if direction: self.fm.tab_move(direction) self.need_redraw = True return True
def open_console(self, mode=':', string='', prompt=None):
def open_console(self, mode=cmode.COMMAND, string='', prompt=None):
def open_console(self, mode=':', string='', prompt=None): """Open the console if the current UI supports that""" if hasattr(self.ui, 'open_console'): self.ui.open_console(mode, string, prompt=prompt)
map('zd', fm.toggle_boolean_option('directories_first'))
map('zd', fm.toggle_boolean_option('sort_directories_first'))
def move(arg): arg.wdg.move(narg=arg.n, **arg.direction)
map('<home>', wdg.move(right=0, absolute=True)) map('<end>', wdg.move(right=-1, absolute=True))
map('<home>', '<C-A>', wdg.move(right=0, absolute=True)) map('<end>', '<C-E>', wdg.move(right=-1, absolute=True))
def ctrl_c(arg): try: item = arg.fm.loader.queue[0] except: arg.fm.notify("Type Q or :quit<Enter> to exit Ranger") else: arg.fm.notify("Aborting: " + item.get_description()) arg.fm.loader.remove(index=0)
map('<backspace>', wdg.delete(-1)) map('<delete>', wdg.delete(0))
map('<backspace>', '<C-H>', wdg.delete(-1)) map('<delete>', '<C-D>', wdg.delete(0))
def ctrl_c(arg): try: item = arg.fm.loader.queue[0] except: arg.fm.notify("Type Q or :quit<Enter> to exit Ranger") else: arg.fm.notify("Aborting: " + item.get_description()) arg.fm.loader.remove(index=0)
obj = CommandLoader(args=['mv'] + mv_flags + + [f.path for f in copied_files]
obj = CommandLoader(args=['mv'] + mv_flags \ + [f.path for f in copied_files] \
def refresh(_): cwd = self.env.get_directory(original_path) cwd.load_content()
obj = CommandLoader(args=['cp', '-f'] + cp_flags
obj = CommandLoader(args=['cp', '-f'] + cp_flags \
def refresh(_): cwd = self.env.get_directory(original_path) cwd.load_content()
obj = CommandLoader(args=['cp'] + cp_flags + [f.path for f in copied_files]
obj = CommandLoader(args=['cp'] + cp_flags \ + [f.path for f in copied_files] \
def refresh(_): cwd = self.env.get_directory(original_path) cwd.load_content()
if ranger.debug:
if ranger.arg.debug:
def notify(self, text, duration=4, bad=False): if isinstance(text, Exception): if ranger.debug: raise bad = True text = str(text) self.log.appendleft(text) if hasattr(self.ui, 'notify'): self.ui.notify(text, duration=duration, bad=bad)
minY = goodY.min() yRange = maxY - minY
def plotHistogram(coaddName, chiSqOrder): coadd = pyfits.open(coaddName) coaddData = coadd[0].data # undo normalization coaddData *= float(chiSqOrder) # get rid of nans and infs goodData = numpy.extract(numpy.isfinite(coaddData.flat), coaddData.flat) goodData = numpy.extract(goodData < 50, goodData) hist, binEdges = numpy.histogram(goodData, bins=NBins) hist = numpy.array(hist, dtype=float) hist /= hist.sum() if UseLogForY: dataY = numpy.log10(hist) else: dataY = hist dataX = binEdges[0:-1] if UseSqrtForX: plotDataX = numpy.sqrt(dataX) else: plotDataX = dataX # plot histogram: log10(frequency) vs. value pyplot.plot(plotDataX, dataY, drawstyle="steps") if UseLogForY: pyplot.ylabel('log10 frequency') else: pyplot.ylabel('frequency') if UseSqrtForX: pyplot.xlabel('sqrt of sum of (counts/noise)^2') else: pyplot.xlabel('sum of (counts/noise)^2') # plot chiSq probability distribution chiSqX = dataX chiSqDist = numpy.power(chiSqX, (chiSqOrder / 2.0) - 1) * numpy.exp(-chiSqX / 2.0) chiSqDist /= chiSqDist.sum() if UseLogForY: chiSqDistY = numpy.log10(chiSqDist) else: chiSqDistY = chiSqDist pyplot.plot(plotDataX, chiSqDistY) # set plot limits goodY = numpy.extract(numpy.isfinite(dataY), dataY) maxY = goodY.max() minY = goodY.min() yRange = maxY - minY # plot out to where Y falls to 1% of max value maxYInd = goodY.argmax() yEndVal = minY + (yRange * 0.01) smallYIndices = numpy.where(goodY < yEndVal)[0] endInd = numpy.extract(smallYIndices > maxYInd, smallYIndices)[0] pyplot.xlim((0, plotDataX[endInd])) yMargin = yRange * 0.05 pyplot.ylim((minY, maxY + yMargin)) pyplot.show()
yEndVal = minY + (yRange * 0.01) smallYIndices = numpy.where(goodY < yEndVal)[0] endInd = numpy.extract(smallYIndices > maxYInd, smallYIndices)[0]
tailMinY = goodY[maxYInd:].min() yRange = maxY - tailMinY yEndVal = tailMinY + (yRange * 0.01) endInd = numpy.where(goodY[maxYInd:] <= yEndVal)[0][0] + maxYInd endInd = len(goodY)-1
def plotHistogram(coaddName, chiSqOrder): coadd = pyfits.open(coaddName) coaddData = coadd[0].data # undo normalization coaddData *= float(chiSqOrder) # get rid of nans and infs goodData = numpy.extract(numpy.isfinite(coaddData.flat), coaddData.flat) goodData = numpy.extract(goodData < 50, goodData) hist, binEdges = numpy.histogram(goodData, bins=NBins) hist = numpy.array(hist, dtype=float) hist /= hist.sum() if UseLogForY: dataY = numpy.log10(hist) else: dataY = hist dataX = binEdges[0:-1] if UseSqrtForX: plotDataX = numpy.sqrt(dataX) else: plotDataX = dataX # plot histogram: log10(frequency) vs. value pyplot.plot(plotDataX, dataY, drawstyle="steps") if UseLogForY: pyplot.ylabel('log10 frequency') else: pyplot.ylabel('frequency') if UseSqrtForX: pyplot.xlabel('sqrt of sum of (counts/noise)^2') else: pyplot.xlabel('sum of (counts/noise)^2') # plot chiSq probability distribution chiSqX = dataX chiSqDist = numpy.power(chiSqX, (chiSqOrder / 2.0) - 1) * numpy.exp(-chiSqX / 2.0) chiSqDist /= chiSqDist.sum() if UseLogForY: chiSqDistY = numpy.log10(chiSqDist) else: chiSqDistY = chiSqDist pyplot.plot(plotDataX, chiSqDistY) # set plot limits goodY = numpy.extract(numpy.isfinite(dataY), dataY) maxY = goodY.max() minY = goodY.min() yRange = maxY - minY # plot out to where Y falls to 1% of max value maxYInd = goodY.argmax() yEndVal = minY + (yRange * 0.01) smallYIndices = numpy.where(goodY < yEndVal)[0] endInd = numpy.extract(smallYIndices > maxYInd, smallYIndices)[0] pyplot.xlim((0, plotDataX[endInd])) yMargin = yRange * 0.05 pyplot.ylim((minY, maxY + yMargin)) pyplot.show()
BaseHTTPServer.HTTPServer.__init__(self, (host, port), RecordHandler)
try: BaseHTTPServer.HTTPServer.__init__(self, (host, port), RecordHandler) except Exception, e: logging.critical('Could not start HTTPServer on port %d: %s', port, e) return
def __init__( self, http_archive_filename, use_deterministic_script, real_dns_lookup, host='localhost', port=80): self.use_deterministic_script = use_deterministic_script self.archive_filename = http_archive_filename self.real_http_request = RealHttpRequest(real_dns_lookup)
BaseHTTPServer.HTTPServer.__init__(self, (host, port), ReplayHandler)
try: BaseHTTPServer.HTTPServer.__init__(self, (host, port), ReplayHandler) except Exception, e: logging.critical('Could not start HTTPServer on port %d: %s', port, e) return
def __init__(self, http_archive_filename, use_deterministic_script, host='localhost', port=80): self.use_deterministic_script = use_deterministic_script self.http_archive = httparchive.HttpArchive.Create(http_archive_filename) logging.info('Loaded %d responses from %s', len(self.http_archive), http_archive_filename) BaseHTTPServer.HTTPServer.__init__(self, (host, port), ReplayHandler) logging.info('Replaying on %s:%s...', host, port)
query.filter("platform =", int(self.request.get("platform_filter"))) if self.request.get("version_vilter"): query.filter("version =", int(self.request.get("version_vilter")))
query.filter("platform =", self.request.get("platform_filter")) if self.request.get("version_filter"): query.filter("version =", self.request.get("version_filter"))
def do_set_search(self): query = models.TestSet.all(); query.order("-date")
self._ipfw([ 'pipe', dns_pipe, 'config', 'bw', '0', 'delay', delay_ms, 'plr', packet_loss_rate ]) self._ipfw(['add', self.pipe_set, 'pipe', dns_pipe, 'udp', 'from', 'any', 'to', '127.0.0.1', 'dst-port', '53'])
def set_traffic_shaping(self, up_bandwidth = '0', down_bandwidth = '0', delay_ms = '0', packet_loss_rate = '0'): """Start shaping traffic.
def __init__(self, host='127.0.0.1', port=53, platform_settings=platformsettings.get_platform_settings()):
def __init__(self, host='127.0.0.1', port=53, platform_settings=None): if not platformsettings: platformsettings.get_platform_settings()
def __init__(self, host='127.0.0.1', port=53, platform_settings=platformsettings.get_platform_settings()): self.host = host self.platform_settings = platform_settings try: SocketServer.ThreadingUDPServer.__init__( self, (host, port), UdpDnsHandler) except socket.error, (error_number, msg): if error_number == errno.EACCES: raise PermissionDenied raise logging.info('Started DNS server on (%s:%s)...', host, port) platform_settings.set_primary_dns(host)
self.platform_settings = platform_settings
self.restore_primary_dns = platform_settings.restore_primary_dns
def __init__(self, host='127.0.0.1', port=53, platform_settings=platformsettings.get_platform_settings()): self.host = host self.platform_settings = platform_settings try: SocketServer.ThreadingUDPServer.__init__( self, (host, port), UdpDnsHandler) except socket.error, (error_number, msg): if error_number == errno.EACCES: raise PermissionDenied raise logging.info('Started DNS server on (%s:%s)...', host, port) platform_settings.set_primary_dns(host)
self.connection.close()
self.close_connection = 1
def send_archived_http_response(self, response): try: # We need to set the server name before we start the response. # Take a scan through the response headers here use_chunked = False has_content_length = False server_name = "WebPageReplay" for header, value in response.headers: if header == "server": server_name = value if header == "transfer-encoding": use_chunked = True if header == "content-length": has_content_length = True self.server_version = server_name self.sys_version = ""
logging.critical("Traffic Shaping Exception ", e)
logging.critical("Traffic Shaping Exception: %s ", e)
def set_traffic_shaping(self, up_bandwidth = '0', down_bandwidth = '0', delay_ms = '0', packet_loss_rate = '0'): """Start shaping traffic.
logging.basicConfig(level=log_level)
logging.basicConfig(level=log_level, format='%(asctime)s %(levelname)s %(message)s')
def format_description(self, description): if description: return description + '\n' else: return ''
def __call_(self, request, headers):
def __call__(self, request, headers):
def __call_(self, request, headers): logging.debug('RealHttpRequest: %s %s', request.host, request.path) host_ip = self._real_dns_lookup(request.host) connection = httplib.HTTPConnection(host_ip) connection.request( request.command, request.path, request.request_body, headers) response = connection.getresponse() connection.close() return response
'queue', queue_size
def __enter__(self): if self.is_traffic_shaping: self.platformsettings.ipfw(['delete', self._RULE_SET]) if (self.up_bandwidth == '0' and self.down_bandwidth == '0' and self.delay_ms == '0' and self.packet_loss_rate == '0'): return
'dst-port', '53'
'dst-port', '53',
def __enter__(self): if self.is_traffic_shaping: self.platformsettings.ipfw(['delete', self._RULE_SET]) if (self.up_bandwidth == '0' and self.down_bandwidth == '0' and self.delay_ms == '0' and self.packet_loss_rate == '0'): return
'mask', 'src-port', '0xffff'
'mask', 'src-port', '0xffff',
def __enter__(self): if self.is_traffic_shaping: self.platformsettings.ipfw(['delete', self._RULE_SET]) if (self.up_bandwidth == '0' and self.down_bandwidth == '0' and self.delay_ms == '0' and self.packet_loss_rate == '0'): return
'pipe', self._UPLOAD_PIPE,
'queue', self._UPLOAD_QUEUE,
def __enter__(self): if self.is_traffic_shaping: self.platformsettings.ipfw(['delete', self._RULE_SET]) if (self.up_bandwidth == '0' and self.down_bandwidth == '0' and self.delay_ms == '0' and self.packet_loss_rate == '0'): return
'dst-port', '80'
'dst-port', '80',
def __enter__(self): if self.is_traffic_shaping: self.platformsettings.ipfw(['delete', self._RULE_SET]) if (self.up_bandwidth == '0' and self.down_bandwidth == '0' and self.delay_ms == '0' and self.packet_loss_rate == '0'): return
'mask', 'dst-port', '0xffff'
'mask', 'dst-port', '0xffff',
def __enter__(self): if self.is_traffic_shaping: self.platformsettings.ipfw(['delete', self._RULE_SET]) if (self.up_bandwidth == '0' and self.down_bandwidth == '0' and self.delay_ms == '0' and self.packet_loss_rate == '0'): return
'pipe', self._DOWNLOAD_PIPE,
'queue', self._DOWNLOAD_QUEUE,
def __enter__(self): if self.is_traffic_shaping: self.platformsettings.ipfw(['delete', self._RULE_SET]) if (self.up_bandwidth == '0' and self.down_bandwidth == '0' and self.delay_ms == '0' and self.packet_loss_rate == '0'): return
'src-port', '80'
'src-port', '80',
def __enter__(self): if self.is_traffic_shaping: self.platformsettings.ipfw(['delete', self._RULE_SET]) if (self.up_bandwidth == '0' and self.down_bandwidth == '0' and self.delay_ms == '0' and self.packet_loss_rate == '0'): return
"-p", str(self.config["packet_loss_rate"] / 100),
"-p", str(self.config["packet_loss_rate"] / 100.0),
def StartProxy(self): logging.debug("Starting Web-Page-Replay") log_level = "info" if self.log_level: log_level = self.log_level cmdline = [ replay_path, "-x", # Disables DNS intercepting "-d", str(self.config["download_bandwidth_kbps"]) + "KBit/s", "-u", str(self.config["upload_bandwidth_kbps"]) + "KBit/s", "-m", str(self.config["round_trip_time_ms"]), "-p", str(self.config["packet_loss_rate"] / 100), "-l", log_level, runner_cfg.replay_data_archive ] logging.debug("Starting replay proxy: %s", str(cmdline)) self.proxy_process = subprocess.Popen(cmdline)
json_error.error = format % args self.response.out.write(json.dumps(json_error))
json_error['error'] = format % args self.response.out.write(json.encode(json_error))
def send_json_error(self, format, *args): """Send a fatal request error to the error log and json output.""" logging.error(format, *args) json_error = {} json_error.error = format % args self.response.out.write(json.dumps(json_error))
json_output.obj = test_set json_output.summaries = test_set.summaries self.response.out.write(json.dumps(json_output))
json_output['obj'] = test_set json_output['summaries'] = [s for s in test_set.summaries] self.response.out.write(json.encode(json_output))
def do_set(self): """Lookup a specific TestSet.""" set_id = self.request.get("id") if not set_id: self.send_json_error("Bad request, no id param") return test_set = models.TestSet.get(db.Key(set_id)) if not test_set: self.send_json_error("Could not find id: ", id) return
json_output.obj = test_summary
json_output['obj'] = test_summary
def do_summary(self): """ Lookup a specific TestSummary""" set_id = self.request.get("id") if not set_id: self.send_json_error("Bad request, no id param") return test_summary = models.TestSummary.get(db.Key(set_id)) if not test_summary: self.send_json_error("Could not find id: ", id) return
json_output.results = test_results self.response.out.write(json.dumps(json_output))
json_output['results'] = [r for r in test_results] self.response.out.write(json.encode(json_output))
def do_summary(self): """ Lookup a specific TestSummary""" set_id = self.request.get("id") if not set_id: self.send_json_error("Bad request, no id param") return test_summary = models.TestSummary.get(db.Key(set_id)) if not test_summary: self.send_json_error("Could not find id: ", id) return
if headers.get('content-type', '').startswith('text/'):
content_type = headers.get('content-type', '') if (content_type.startswith('text/') or content_type == 'application/x-javascript'):
def remove_header(self, key): for i, (k, v) in enumerate(self.headers): if key == k: self.headers.pop(i) return
'plr', self.packet_loss_rate
'plr', self.packet_loss_rate, 'queue', queue_size
def __enter__(self): if self.is_traffic_shaping: self.platformsettings.ipfw(['delete', self.pipe_set]) if (self.up_bandwidth == '0' and self.down_bandwidth == '0' and self.delay_ms == '0' and self.packet_loss_rate == '0'): return try: upload_pipe = '1' # The IPFW pipe for upload rules. download_pipe = '2' # The IPFW pipe for download rules. dns_pipe = '3' # The IPFW pipe for DNS.
'queue', '4000000'
'queue', queue_size
def __enter__(self): if self.is_traffic_shaping: self.platformsettings.ipfw(['delete', self.pipe_set]) if (self.up_bandwidth == '0' and self.down_bandwidth == '0' and self.delay_ms == '0' and self.packet_loss_rate == '0'): return try: upload_pipe = '1' # The IPFW pipe for upload rules. download_pipe = '2' # The IPFW pipe for download rules. dns_pipe = '3' # The IPFW pipe for DNS.
self._ipfw([
self.platformsettings.ipfw([
def __enter__(self): if self.is_traffic_shaping: self.platformsettings.ipfw(['delete', self.pipe_set]) if (self.up_bandwidth == '0' and self.down_bandwidth == '0' and self.delay_ms == '0' and self.packet_loss_rate == '0'): return try: upload_pipe = '1' # The IPFW pipe for upload rules. download_pipe = '2' # The IPFW pipe for download rules. dns_pipe = '3' # The IPFW pipe for DNS.
option_parser.add_option('-n', '--no-deterministic_script', default=True,
harness_group.add_option('-n', '--no-deterministic_script', default=True,
def format_description(self, description): if description: return description + '\n' else: return ''
option_parser.add_option('-P', '--no-dns_private_passthrough', default=True,
harness_group.add_option('-P', '--no-dns_private_passthrough', default=True,
def format_description(self, description): if description: return description + '\n' else: return ''
option_parser.add_option('-x', '--no-dns_forwarding', default=True,
harness_group.add_option('-x', '--no-dns_forwarding', default=True,
def format_description(self, description): if description: return description + '\n' else: return ''
delay_ms = str(int(delay_ms) / 2)
self.delay_ms = str(int(self.delay_ms) / 2)
def __enter__(self): if self.is_traffic_shaping: self.platformsettings.ipfw(['delete', self.pipe_set]) if (self.up_bandwidth == '0' and self.down_bandwidth == '0' and self.delay_ms == '0' and self.packet_loss_rate == '0'): return try: upload_pipe = '1' # The IPFW pipe for upload rules. download_pipe = '2' # The IPFW pipe for download rules. dns_pipe = '3' # The IPFW pipe for DNS.
if shape_dns:
if self.shape_dns:
def __enter__(self): if self.is_traffic_shaping: self.platformsettings.ipfw(['delete', self.pipe_set]) if (self.up_bandwidth == '0' and self.down_bandwidth == '0' and self.delay_ms == '0' and self.packet_loss_rate == '0'): return try: upload_pipe = '1' # The IPFW pipe for upload rules. download_pipe = '2' # The IPFW pipe for download rules. dns_pipe = '3' # The IPFW pipe for DNS.
obj.read_bytes_kb = int(request.get('read_bytes_kb')) obj.write_bytes_kb = int(request.get('write_bytes_kb'))
obj.read_bytes_kb = int(float(request.get('read_bytes_kb'))) obj.write_bytes_kb = int(float(request.get('write_bytes_kb')))
def ApplyStatisticsData(request, obj): """Applies statistics uploaded via the request to the object.""" obj.using_spdy = bool(request.get('using_spdy')=="CHECKED") obj.start_load_time = int(request.get('start_load_time')) obj.commit_load_time = int(request.get('commit_load_time')) obj.doc_load_time = int(request.get('doc_load_time')) obj.paint_time = int(request.get('paint_time')) obj.total_time = int(request.get('total_time')) obj.last_load_time = int(request.get('last_load_time')) obj.num_requests = int(request.get('num_requests')) obj.num_connects = int(request.get('num_connects')) obj.num_sessions = int(request.get('num_sessions')) obj.read_bytes_kb = int(request.get('read_bytes_kb')) obj.write_bytes_kb = int(request.get('write_bytes_kb'))
d = self.process_rs(owner, content, rs)
d = self.process_rs(rs)
def collect(self, vs=None, rs=None): """ Collect data.
d.addCallback(lambda x: len(x) > 0 and x[0][0] or None)
d.addCallback(lambda x: len(x) > 0 and (x[0][0] + 1) or None)
def age(self, ctx, lb=None, vs=None, rs=None, sorry=False): """ Return the age of the given resource.
query that should return URL (without prefix) of matching results. The URL will be prefixed by API version.
query that should return URL of matching results.
def childFactory(self, ctx, name): """ Dispatch the search to the generic search handler. """ return SearchGenericResource(self.dbpool, name)
Run the search query and prefix the results with API URL.
Run the search query.
def search(self, ctx): """ Run the search query and prefix the results with API URL. """ api = IApiVersion(ctx) d = self.dbpool.runQueryInPast(ctx, self.query(), {'term': self.term}) d.addCallback(lambda results: ["/api/%s/%s" % (api, y[0]) for y in results]) return d
api = IApiVersion(ctx)
def search(self, ctx): """ Run the search query and prefix the results with API URL. """ api = IApiVersion(ctx) d = self.dbpool.runQueryInPast(ctx, self.query(), {'term': self.term}) d.addCallback(lambda results: ["/api/%s/%s" % (api, y[0]) for y in results]) return d
self.query(), {'term': self.term}) d.addCallback(lambda results: ["/api/%s/%s" % (api, y[0]) for y in results])
self.query(), {'term': self.term}) d.addCallback(lambda results: [y[0] for y in results])
def search(self, ctx): """ Run the search query and prefix the results with API URL. """ api = IApiVersion(ctx) d = self.dbpool.runQueryInPast(ctx, self.query(), {'term': self.term}) d.addCallback(lambda results: ["/api/%s/%s" % (api, y[0]) for y in results]) return d
SELECT 'loadbalancer/' || name || '/'
SELECT '/loadbalancer/' || name || '/'
def query(self): return """
SELECT 'loadbalancer/' || lb || '/virtualserver/' || vs || '/'
SELECT '/loadbalancer/' || lb || '/virtualserver/' || vs || '/'
def query(self): return """
SELECT 'loadbalancer/' || lb || '/virtualserver/' || vs || '/realserver/' || rs || '/'
SELECT '/loadbalancer/' || lb || '/virtualserver/' || vs || '/realserver/' || rs || '/'
def query(self): return """
mo = re.match(r"[rb](\d+)", rs)
mo = re.match(r"([rb])(\d+)", rs)
def parse(self, vs=None, rs=None): """ Parse vs and rs into v, s, g, r. """ if vs is not None: mo = re.match(r"v(\d+)s(\d+)g(\d+)", vs) if not mo: raise ValueError("%r is not a valid virtual server" % vs) v, s, g = int(mo.group(1)), int(mo.group(2)), int(mo.group(3)) if rs is not None: mo = re.match(r"[rb](\d+)", rs) if not mo: raise ValueError("%r is not a valid real server" % rs) r = int(mo.group(1)) return v, s, g, r return v, s, g, None return None, None, None, None
r = int(mo.group(1)) return v, s, g, r return v, s, g, None return None, None, None, None
r = int(mo.group(2)) return v, s, g, r, mo.group(1) == "b" return v, s, g, None, None return None, None, None, None, None
def parse(self, vs=None, rs=None): """ Parse vs and rs into v, s, g, r. """ if vs is not None: mo = re.match(r"v(\d+)s(\d+)g(\d+)", vs) if not mo: raise ValueError("%r is not a valid virtual server" % vs) v, s, g = int(mo.group(1)), int(mo.group(2)), int(mo.group(3)) if rs is not None: mo = re.match(r"[rb](\d+)", rs) if not mo: raise ValueError("%r is not a valid real server" % rs) r = int(mo.group(1)) return v, s, g, r return v, s, g, None return None, None, None, None
v, s, g, r = self.parse(vs, rs)
v, s, g, r, backup = self.parse(vs, rs)
def collect(self, vs=None, rs=None): """ Collect data for an Alteon """ v, s, g, r = self.parse(vs, rs) if v is not None: if r is not None: # Collect data to refresh a specific real server d = self.process_rs(v, s, g, r) else: # Collect data to refresh a virtual server d = self.process_vs(v, s, g) else: # Otherwise, collect everything d = self.process_all() return d
d = self.process_rs(v, s, g, r)
d = self.process_rs(v, s, g, r, backup)
def collect(self, vs=None, rs=None): """ Collect data for an Alteon """ v, s, g, r = self.parse(vs, rs) if v is not None: if r is not None: # Collect data to refresh a specific real server d = self.process_rs(v, s, g, r) else: # Collect data to refresh a virtual server d = self.process_vs(v, s, g) else: # Otherwise, collect everything d = self.process_all() return d
v, s, g, r = self.parse(vs, rs)
v, s, g, r, backup = self.parse(vs, rs) if backup is True: yield {} return
def actions(self, vs=None, rs=None): """ List possible actions.
v, s, g, r = self.parse(vs, rs)
v, s, g, r, backup = self.parse(vs, rs) if backup is True: yield None return
def execute(self, action, actionargs=None, vs=None, rs=None): """ Execute an action.
g = defer.waitForDeferred(
gr = defer.waitForDeferred(
def process_vs(self, v, s, g): """ Process data for a given virtual server when no real server is provided
yield g g.getResult()
yield gr gr.getResult()
def process_vs(self, v, s, g): """ Process data for a given virtual server when no real server is provided
mo = re.match(r"(.*)|(.*)", vs)
mo = re.match(r"(.*)\|(.*)", vs)
def parse(self, vs=None, rs=None): """ Parse vs and rs into owner, content, rs """ if vs is not None: mo = re.match(r"(.*)|(.*)", vs) if not mo: raise ValueError("%r is not a valid virtual server" % vs) owner, content = mo.groups() return owner, content, rs return None, None, None
d = self.process_rs(rs)
d = self.process_rs(owner, content, rs, None)
def collect(self, vs=None, rs=None): """ Collect data.
rs = defer.waitForDeferred(self.process_rs(service))
rs = defer.waitForDeferred(self.process_rs(owner, content, service))
def process_vs(self, owner, content): """ Process data for a given virtual server when no real server is provided
rs = defer.waitForDeferred(self.process_rs(service, backup))
rs = defer.waitForDeferred(self.process_rs(owner, content, service, backup))
def process_vs(self, owner, content): """ Process data for a given virtual server when no real server is provided
def process_rs(self, service, backup=False):
def process_rs(self, owner, content, service, backup=False):
def process_rs(self, service, backup=False): """ Process data for a given virtual server and real server.
vip = "mark%d:%d" % tuple(self.cache(('virtualServerFwMark', v), ('virtualServerPort', v)))
vip = "mark%d:0" % self.cache(('virtualServerFwMark', v))
def process_vs(self, v): """ Process data for a given virtual server when no real server is provided
def nogateway(x):
def nogateway():
def nogateway(x): request.setResponseCode(504) # Gateway timeout return "No gateway available"
d.addBoth(lambda x: x is not None and collector.collect(vs, rs) and x or x)
d.addBoth(lambda x: refresh(x))
def execute_and_refresh(collector): d = collector.execute(action, actionargs, vs, rs) # We refresh only if the result is not None (action has # been executed) We don't alter the original result. Don't # refresh a whole load balancer. if vs is not None or rs is not None: d.addBoth(lambda x: x is not None and collector.collect(vs, rs) and x or x) return d
for rstate in state[1:]:
for rstate in states[1:]:
def aggregate_state(states): if not states: return "ok" state = states[0] for rstate in state[1:]: if rstate == "ok": if cur == "disabled": state = "ok" continue if cur == "down": state = "degraded" continue continue if rstate == "disabled": continue if rstate == "down": if cur == "ok": state = "degraded" continue if cur == "disabled": state = "down" continue continue return state
if cur == "disabled":
if state == "disabled":
def aggregate_state(states): if not states: return "ok" state = states[0] for rstate in state[1:]: if rstate == "ok": if cur == "disabled": state = "ok" continue if cur == "down": state = "degraded" continue continue if rstate == "disabled": continue if rstate == "down": if cur == "ok": state = "degraded" continue if cur == "disabled": state = "down" continue continue return state
if cur == "down":
if state == "down":
def aggregate_state(states): if not states: return "ok" state = states[0] for rstate in state[1:]: if rstate == "ok": if cur == "disabled": state = "ok" continue if cur == "down": state = "degraded" continue continue if rstate == "disabled": continue if rstate == "down": if cur == "ok": state = "degraded" continue if cur == "disabled": state = "down" continue continue return state
if cur == "ok":
if state == "ok":
def aggregate_state(states): if not states: return "ok" state = states[0] for rstate in state[1:]: if rstate == "ok": if cur == "disabled": state = "ok" continue if cur == "down": state = "degraded" continue continue if rstate == "disabled": continue if rstate == "down": if cur == "ok": state = "degraded" continue if cur == "disabled": state = "down" continue continue return state
fc_name, fc_date = get_flowcell_info(fc_dir)
def main(config_file, fc_dir, run_info_yaml=None): work_dir = os.getcwd() with open(config_file) as in_handle: config = yaml.load(in_handle) if run_info_yaml: with open(run_info_yaml) as in_handle: run_details = yaml.load(in_handle) run_info = dict(details=run_details, run_id="") else: galaxy_api = GalaxyApiAccess(config['galaxy_url'], config['galaxy_api_key']) run_info = galaxy_api.run_details(fc_name) fc_name, fc_date = get_flowcell_info(fc_dir) run_items = _add_multiplex_to_control(run_info["details"]) fastq_dir = get_fastq_dir(fc_dir) align_dir = os.path.join(work_dir, "alignments") # process each flowcell lane pool = (Pool(config["algorithm"]["num_cores"]) if config["algorithm"]["num_cores"] > 1 else None) map_fn = pool.map if pool else map try: map_fn(_process_lane_wrapper, ((i, fastq_dir, fc_name, fc_date, align_dir, config, config_file) for i in run_items)) except: if pool: pool.terminate() raise # process samples, potentially multiplexed across multiple lanes sample_files, sample_fastq, sample_info = organize_samples(align_dir, fastq_dir, work_dir, fc_name, fc_date, run_items) try: map_fn(_process_sample_wrapper, ((name, sample_fastq[name], sample_info[name], bam_files, work_dir, config, config_file) for name, bam_files in sample_files)) except: if pool: pool.terminate() raise write_metrics(run_info, work_dir, fc_dir, fc_name, fc_date, fastq_dir)
os.makedirs(align_dir)
try: os.makedirs(align_dir) except OSError: pass assert os.path.exists(align_dir)
def do_alignment(fastq1, fastq2, align_ref, sam_ref, lane_name, sample_name, align_dir, config, config_file): """Align to the provided reference genome, returning an aligned SAM file. """ aligner_to_use = config["algorithm"]["aligner"] if not os.path.exists(align_dir): os.makedirs(align_dir) print lane_name, "Aligning with", aligner_to_use if aligner_to_use == "bowtie": sam_file = bowtie_to_sam(fastq1, fastq2, align_ref, lane_name, align_dir, config) elif aligner_to_use == "bwa": sam_file = bwa_align_to_sam(fastq1, fastq2, align_ref, lane_name, align_dir, config) elif aligner_to_use == "maq": sam_file = maq_align_to_sam(fastq1, fastq2, align_ref, lane_name, sample_name, align_dir, config_file) else: raise ValueError("Do not recognize aligner: %s" % aligner_to_use) print lane_name, "Converting to sorted BAM file" sam_to_sort_bam(sam_file, sam_ref, fastq1, fastq2, sample_name, lane_name, config_file)