rem
stringlengths
0
322k
add
stringlengths
0
2.05M
context
stringlengths
8
228k
grad, step_coeff = self._step_direction(p)
grad, step_coeff = self._step_direction(p, dostep = True)
def step(self, p): grad, step_coeff = self._step_direction(p) states = p.states for (g, state) in zip(grad,states): state.x += step_coeff * g
for (g, state) in zip(grad,states): state.x += step_coeff * g
def step(self, p): grad, step_coeff = self._step_direction(p) states = p.states for (g, state) in zip(grad,states): state.x += step_coeff * g
self.linesearch_stop_code = None
def __init__(self, feval, max_line_steps=10, quiet=True, **kwargs): ''' Gradient-descent parameter update strategy, performing a line-search to select the step size
self.linesearch_stop_code = None
def reset(self): self.linesearch_stop_code = None self.cur_num_steps = -1 super(gd_linesearch_update, self).reset()
def _step_direction(self, p):
def _step_direction(self, p, dostep=True):
def _step_direction(self, p): grad, step_coeff = \ super(gd_linesearch_update, self)._step_direction(p) feval = self.feval states = p.states bup = p.backup() stop = self.max_line_steps step = 0 cur_energy = feval() new_energy = np.infty while new_energy > cur_energy and step != stop: for (g, state) in zip(grad,states): state.x += step_coeff * g new_energy = feval()
super(gd_linesearch_update, self)._step_direction(p)
super(gd_linesearch_update, self)._step_direction(p, False)
def _step_direction(self, p): grad, step_coeff = \ super(gd_linesearch_update, self)._step_direction(p) feval = self.feval states = p.states bup = p.backup() stop = self.max_line_steps step = 0 cur_energy = feval() new_energy = np.infty while new_energy > cur_energy and step != stop: for (g, state) in zip(grad,states): state.x += step_coeff * g new_energy = feval()
while new_energy > cur_energy and step != stop: for (g, state) in zip(grad,states): state.x += step_coeff * g
while step != stop: step += 1 self._perform_step(p, grad, step_coeff) new_energy = feval()
def _step_direction(self, p): grad, step_coeff = \ super(gd_linesearch_update, self)._step_direction(p) feval = self.feval states = p.states bup = p.backup() stop = self.max_line_steps step = 0 cur_energy = feval() new_energy = np.infty while new_energy > cur_energy and step != stop: for (g, state) in zip(grad,states): state.x += step_coeff * g new_energy = feval()
new_energy = feval()
if new_energy < cur_energy: break
def _step_direction(self, p): grad, step_coeff = \ super(gd_linesearch_update, self)._step_direction(p) feval = self.feval states = p.states bup = p.backup() stop = self.max_line_steps step = 0 cur_energy = feval() new_energy = np.infty while new_energy > cur_energy and step != stop: for (g, state) in zip(grad,states): state.x += step_coeff * g new_energy = feval()
step += 1
def _step_direction(self, p): grad, step_coeff = \ super(gd_linesearch_update, self)._step_direction(p) feval = self.feval states = p.states bup = p.backup() stop = self.max_line_steps step = 0 cur_energy = feval() new_energy = np.infty while new_energy > cur_energy and step != stop: for (g, state) in zip(grad,states): state.x += step_coeff * g new_energy = feval()
if step == stop: self.linesearch_stop_code = 'iteration limit reached' else: self.linesearch_stop_code = 'energy decreased'
if new_energy >= cur_energy: self.stop_code = 'line search failed'
def _step_direction(self, p): grad, step_coeff = \ super(gd_linesearch_update, self)._step_direction(p) feval = self.feval states = p.states bup = p.backup() stop = self.max_line_steps step = 0 cur_energy = feval() new_energy = np.infty while new_energy > cur_energy and step != stop: for (g, state) in zip(grad,states): state.x += step_coeff * g new_energy = feval()
(step, self.linesearch_stop_code)
(step, ('energy decreased' if new_energy < cur_energy else 'iteration limit reached')) if not dostep: p.restore(bup)
def _step_direction(self, p): grad, step_coeff = \ super(gd_linesearch_update, self)._step_direction(p) feval = self.feval states = p.states bup = p.backup() stop = self.max_line_steps step = 0 cur_energy = feval() new_energy = np.infty while new_energy > cur_energy and step != stop: for (g, state) in zip(grad,states): state.x += step_coeff * g new_energy = feval()
def __new__(cls, shape, dtype=rtype): shape = ensure_tuple(shape)
def __new__(cls, *args): if len(args) == 0: return object.__new__(cls) shape = ensure_tuple(args[0]) dtype = rtype if len(args) < 2 else args[1]
def __new__(cls, shape, dtype=rtype): shape = ensure_tuple(shape) if shape[0] == 1: return degen_rolling_average(shape, dtype) return object.__new__(cls)
def __new__(cls, shape, dtype=rtype):
def __new__(cls, *args):
def __new__(cls, shape, dtype=rtype): return object.__new__(cls)
base_super = super
_base_super = super
def make_wrapper(inner_name, around_fn): return lambda self, *args, **kwargs: \ around_fn(self, getattr(self, inner_name), *args, **kwargs)
return base_super(cls, *args)
return _base_super(cls, *args)
def __new__(self, cls, *args): if not hasattr(cls, '__renames__') or not args: return base_super(cls, *args) return object.__new__(self)
self.__sup = base_super(cls, obj)
self.__sup = _base_super(cls, obj)
def __init__(self, cls, obj): self.__sup = base_super(cls, obj) self.__renames = cls.__renames__
__builtins__.super = around_super
__builtins__.super = around_super __builtins__._base_super = _base_super
def __getattribute__(self, name): if name.startswith('_around_super__'): return object.__getattribute__(self, name) sup = self.__sup renames = self.__renames if name in renames: name = renames[name] return getattr(sup, name)
__builtins__['super'] = around_super
__builtins__['super'] = around_super __builtins__['_base_super'] = _base_super
def __getattribute__(self, name): if name.startswith('_around_super__'): return object.__getattribute__(self, name) sup = self.__sup renames = self.__renames if name in renames: name = renames[name] return getattr(sup, name)
param.updater = gd_update( eta = 0.02, max_iters = 100 )
param.updater = gd_update( eta = 0.02 )
def plot(ds, machine): shape_in, shape_out = ds.shape() assert (shape_in == shape_out == (1,)) inp = state((1,)); out = state((1,)); des = state((1,)) ds.seek(0) size = ds.size() coords = empty(size); outputs = empty(size); targets = empty(size) for i in xrange(size): ds.fprop(inp, des) machine.fprop(inp, out) coords[i] = inp.x; outputs[i] = out.x; targets[i] = des.x ds.next() from matplotlib import pyplot indices = coords.argsort() coords = coords.take(indices) outputs = outputs.take(indices) targets = targets.take(indices) pyplot.ioff() pyplot.plot(coords, outputs, label = 'machine output') pyplot.plot(coords, targets, label = 'desired output') pyplot.legend() pyplot.show()
trainer.train(50000)
trainer.train(10000)
def plot(ds, machine): shape_in, shape_out = ds.shape() assert (shape_in == shape_out == (1,)) inp = state((1,)); out = state((1,)); des = state((1,)) ds.seek(0) size = ds.size() coords = empty(size); outputs = empty(size); targets = empty(size) for i in xrange(size): ds.fprop(inp, des) machine.fprop(inp, out) coords[i] = inp.x; outputs[i] = out.x; targets[i] = des.x ds.next() from matplotlib import pyplot indices = coords.argsort() coords = coords.take(indices) outputs = outputs.take(indices) targets = targets.take(indices) pyplot.ioff() pyplot.plot(coords, outputs, label = 'machine output') pyplot.plot(coords, targets, label = 'desired output') pyplot.legend() pyplot.show()
_base_super = super
def make_wrapper(inner_name, around_fn): return lambda self, *args, **kwargs: \ around_fn(self, getattr(self, inner_name), *args, **kwargs)
__builtins__._base_super = _base_super
def __getattribute__(self, name): if name.startswith('_around_super__'): return object.__getattribute__(self, name) sup = self.__sup renames = self.__renames if name in renames: name = renames[name] return getattr(sup, name)
__builtins__['_base_super'] = _base_super
def __getattribute__(self, name): if name.startswith('_around_super__'): return object.__getattribute__(self, name) sup = self.__sup renames = self.__renames if name in renames: name = renames[name] return getattr(sup, name)
min_finished = self.ds_train.size() - self.ds.tell()
min_finished = self.ds_train.size() - self.ds_train.tell()
def train_online(self, niters = -1): if not self.quiet: print 'Starting training on %s%s' % \ (time.asctime(), ' (max %d iterations)' % (niters,) if niters >= 0 else '') parameter = self.parameter age = self.age msg = self.msg hess_interval = self.hess_interval report_interval = self.report_interval valid_interval = self.valid_interval backup_interval = self.backup_interval keep_training = True if hess_interval <= 0: parameter.set_epsilon(1.) else: self.compute_diag_hessian() stop_age = age + niters min_finished = 0 if self.complete_training: min_finished = self.ds_train.size() - self.ds.tell() while age != stop_age and (keep_training or age < min_finished):
eta = 2.0,
eta = 1.0,
def plot_reconstructions(ds, machine, n = 1000, orig_x=5, orig_y=5, max_x=795, scale = 1.0): shape_in, shape_out = ds.shape() assert (len(shape_in) == len(shape_out) == 2) ensure_window(title = 'PSD Reconstructions') spacing, padding = 2, 5 pic = empty((max(shape_in[0], shape_out[0]), shape_in[1] + spacing + shape_out[1])) h, w = pic.shape; h *= scale; w *= scale pos_x, pos_y = 0, 0 ds.seek(0) inp, tgt = state(()), state(()) for i in xrange(min(n, ds.size())): ds.fprop(inp, tgt) machine.encoder.fprop(inp, machine.encoder_out) machine.decoder.fprop(machine.encoder_out, machine.decoder_out) ds.next() inpx = inp.x recx = machine.decoder_out.x white = max( inpx.max(), recx.max() ) pic.fill(white) pic[:shape_in[0],:shape_in[1]] = inpx pic[:shape_out[0],shape_in[1] + spacing:] = recx if pos_x + w > max_x: pos_x = 0; pos_y += h + padding draw_mat(pic, orig_x + pos_x, orig_y + pos_y, maxv = white, scale = scale) pos_x += w + padding
hess_interval = 0, report_interval = 1,
hess_interval = 10000, report_interval = 50,
def plot_reconstructions(ds, machine, n = 1000, orig_x=5, orig_y=5, max_x=795, scale = 1.0): shape_in, shape_out = ds.shape() assert (len(shape_in) == len(shape_out) == 2) ensure_window(title = 'PSD Reconstructions') spacing, padding = 2, 5 pic = empty((max(shape_in[0], shape_out[0]), shape_in[1] + spacing + shape_out[1])) h, w = pic.shape; h *= scale; w *= scale pos_x, pos_y = 0, 0 ds.seek(0) inp, tgt = state(()), state(()) for i in xrange(min(n, ds.size())): ds.fprop(inp, tgt) machine.encoder.fprop(inp, machine.encoder_out) machine.decoder.fprop(machine.encoder_out, machine.decoder_out) ds.next() inpx = inp.x recx = machine.decoder_out.x white = max( inpx.max(), recx.max() ) pic.fill(white) pic[:shape_in[0],:shape_in[1]] = inpx pic[:shape_out[0],shape_in[1] + spacing:] = recx if pos_x + w > max_x: pos_x = 0; pos_y += h + padding draw_mat(pic, orig_x + pos_x, orig_y + pos_y, maxv = white, scale = scale) pos_x += w + padding
trainer.train(20)
trainer.train(2000)
def plot_reconstructions(ds, machine, n = 1000, orig_x=5, orig_y=5, max_x=795, scale = 1.0): shape_in, shape_out = ds.shape() assert (len(shape_in) == len(shape_out) == 2) ensure_window(title = 'PSD Reconstructions') spacing, padding = 2, 5 pic = empty((max(shape_in[0], shape_out[0]), shape_in[1] + spacing + shape_out[1])) h, w = pic.shape; h *= scale; w *= scale pos_x, pos_y = 0, 0 ds.seek(0) inp, tgt = state(()), state(()) for i in xrange(min(n, ds.size())): ds.fprop(inp, tgt) machine.encoder.fprop(inp, machine.encoder_out) machine.decoder.fprop(machine.encoder_out, machine.decoder_out) ds.next() inpx = inp.x recx = machine.decoder_out.x white = max( inpx.max(), recx.max() ) pic.fill(white) pic[:shape_in[0],:shape_in[1]] = inpx pic[:shape_out[0],shape_in[1] + spacing:] = recx if pos_x + w > max_x: pos_x = 0; pos_y += h + padding draw_mat(pic, orig_x + pos_x, orig_y + pos_y, maxv = white, scale = scale) pos_x += w + padding
run_shell ('mono --debug solitary/Solitary.exe '
run_shell ('mono --debug ../../solitary/Solitary.exe '
def make_app_bundle (self): plist_path = os.path.join (self.bundle_skeleton_dir, 'Contents', 'Info.plist') app_name = 'Unknown.app' plist = None if os.path.exists (plist_path): plist = Plist.fromFile (plist_path) app_name = plist['CFBundleExecutable'] else: print 'Warning: no Contents/Info.plist in .app skeleton'
Package.__init__ (self, 'banshee-1', '1.7.3')
Package.__init__ (self, 'banshee-1', '1.7.4')
def __init__ (self): Package.__init__ (self, 'banshee-1', '1.7.3')
'http://download.banshee.fm/banshee/unstable/%{version}/%{name}-%{version}.tar.bz2'
'http://download.banshee.fm/banshee/unstable/%{version}/%{name}-%{version}.tar.bz2', 'patches/banshee-gnome-doc-utils-fix.patch'
def __init__ (self): Package.__init__ (self, 'banshee-1', '1.7.3')
'--disable-webkit', '--disable-youtube'
'--disable-boo', '--disable-youtube', '--disable-gnome'
def __init__ (self): Package.__init__ (self, 'banshee-1', '1.7.3')
'--disable-boo', '--disable-gnome',
def __init__ (self): Package.__init__ (self, 'banshee-1', '1.7.3')
product = party_obj.browse(cursor, user, product, context=context)
product = product_obj.browse(cursor, user, product, context=context)
def compute(self, cursor, user, price_list, party, product, unit_price, quantity, uom, pattern=None, context=None): ''' Compute price based on price list of party
product = party_obj.browse(product)
product = product_obj.browse(product)
def compute(self, price_list, party, product, unit_price, quantity, uom, pattern=None): ''' Compute price based on price list of party
probe.log("MAPPING_TIMEOUT mapped_ip=%s" % probe.mapped_ip)
probe.log("MAPPING_TIMEOUT")
def run(self): # Advance state of probes that are ready for probe in self.probes: now = time.time() if not probe.next_ts <= now: continue if probe.waiting: # Last ping timed out, so back off a TTL if probe.mapping_tries == probe.max_mapping_tries: probe.mapped = True probe.max_ttl -= 1 probe.log("MAPPED mapped_ip=%s max_ttl=%s" % (probe.mapped_ip, probe.max_ttl)) else: probe.log("MAPPING_TIMEOUT mapped_ip=%s" % probe.mapped_ip) probe.mapping_tries += 1 elif not probe.mapped: # If probe isn't mapped, advance TTL and send a ping probe.max_ttl += 1 probe.waiting = True # Schedule a timeout handler probe.next_ts = now + probe.timeout probe.ping_ts = now probe.seq += 1 # Send the ping! probe.log("PING ip=%s id=%s seq=%s ttl=%s" % (probe.ip, self.pid, probe.seq, probe.max_ttl)) self.ping(probe.ip, id=self.pid, seq=probe.seq, ttl=probe.max_ttl) # Process incoming data while True: now = time.time() rlist, wlist, xlist = select.select([self.sock, sys.stdin], [], [], 0.01) if not rlist: break if sys.stdin in rlist: line = sys.stdin.readline() if self.state == STATE_COMMAND: line = line.strip().upper() if line.startswith == 'PING': # TODO: allow registration of pings' remotely pass if self.sock in rlist: # Should never need more than ~50 bytes data, paddr = self.sock.recvfrom(256) paddr = paddr[0] icmpHeader = data[20:24] ptype, pcode, checksum = struct.unpack( "bbH", icmpHeader ) if ptype == 0: # ICMP Echo Reply body = data[24:40] pid, pseq, tstamp = struct.unpack( "HhL", body ) probe = self.get_probe(paddr) if probe: tstamp = data[28:36] et = int(time.time() * 1000.0) [st] = struct.unpack("L", tstamp) ms = (et - st) if not probe.mapped: probe.mapped = True probe.log("MAPPED mapped_ip=%s max_ttl=%s" % (probe.mapped_ip, probe.max_ttl)) probe.log("PONG type=echo_reply id=%s seq=%s ms=%s" % (pid, pseq, ms)) probe.next_ts = now + probe.interval probe.waiting = False elif ptype == 11 and pcode == 0: # ICMP Time Exceeded original_ip_header, original_data = data[28:48], data[48:56] dest_addr = util.ip(original_ip_header[16:20]) probe = self.get_probe(dest_addr) if not probe: return # Original datagram is included after the IP header otype, ocode, ochecksum, oid, oseq = struct.unpack( "bbHHh", original_data ) ms = int((now - probe.ping_ts) * 1000.0) probe.log("PONG type=time_exceeded id=%s seq=%s addr=%s ms=%s" % (oid, oseq, paddr, ms)) if probe.mapped: # Just schedule the next regular ping probe.next_ts = now + probe.interval probe.waiting = False if not probe.mapped: # Set new max_ttl, if necessary, and continue mapping probe.max_ttl = max(oseq, probe.max_ttl) probe.next_ts = now probe.waiting = False probe.mapped_ip = paddr # In case this was a retry, reset the counter probe.mapping_tries = 0
hosts = ['cnn.com', 'google.com', 'justin.tv', 'yahoo.com', 'nytimes.com', 'ustream.tv', 'ycombinator.com', 'blogtv.com', 'comcast.com', 'ea.com']
hosts = ['cnn.com']
def ping(self, addr, id=1, seq=1, ttl=255, verbose = False): addr = socket.gethostbyname(addr) packet = self.create_packet(addr, id, seq) if verbose: print "Pinging %s (id=%s seq=%s ttl=%s)" % (addr, id, seq, ttl) self.sock.setsockopt(socket.SOL_IP, socket.IP_TTL, ttl) # Python requires a tuple of (addr, port), but the system call actually doesn't # require a port. So we'll send '1' as a dummy port value. self.sock.sendto(packet, (addr, 1))
self.search_options[k]=v(self.context) return self.search_options
v = v(self.context) options[k] = v return options
def _search_options(self): for k,v in self.search_options.items(): if callable(v): self.search_options[k]=v(self.context) return self.search_options
self.search_util = queryUtility(ISearch)
def search(self, kwargs={}): self.search_util = queryUtility(ISearch) parameters = {} query = self.build_query() flares = self.search_util(query, **parameters) self.contents = [PloneFlare(f) for f in flares]
def filters(self): return []
def filters(self): return []
def filters(self): return []
filters = [] auto_count = None
def filters(self): return []
request_filters = [('review_state', 'state')]
request_filters = [('review_state', 'review_state')]
def filters(self): return []
template = self.table
template = self.table, auto_count = self.auto_count,
def render_listing(self): generator = queryUtility(ITableGenerator, 'ftw.tablegenerator') return generator.generate(self.batch, self.columns, sortable=True, selected=(self.sort_on, self.sort_order), template = self.table )
parsed_state = json.loads(state) if 'group' in parsed_state: del parsed_state['group'] state = json.dumps(parsed_state)
if state: parsed_state = json.loads(state) if 'group' in parsed_state: del parsed_state['group'] state = json.dumps(parsed_state)
def load_grid_state(self): """Loads the stored grid state - if any is stored. """ # get the key from the key generator generator = queryMultiAdapter((self.context, self, self.request), IGridStateStorageKeyGenerator) key = generator.get_key()
parsed_state = json.loads(state)
def load_grid_state(self): """Loads the stored grid state - if any is stored. """ # get the key from the key generator generator = queryMultiAdapter((self.context, self, self.request), IGridStateStorageKeyGenerator) key = generator.get_key()
generator = queryMultiAdapter((self.context, listing, self.request),
generator = queryMultiAdapter((self.context, listing_view, self.request),
def setgridstate(self): """Stores the current grid configuration (visible columns, column order, grouping, sorting etc.) persistent in dictstorage. """
self.catalog = catalog = getToolByName(self.context, 'portal_catalog')
self.catalog = getToolByName(self.context, 'portal_catalog')
def search(self, kwargs): self.catalog = catalog = getToolByName(self.context, 'portal_catalog') query = self.build_query(**kwargs) self.contents = self.catalog(**query) self.len_results = len(self.contents)
return len(self.contents) > self.pagesize
return self.len_results > self.pagesize
def multiple_pages(self): """The multiple_pages in plone.app.batch has a bug if size == pagesize."""
self.catalog = getToolByName(self.context,'portal_catalog')
self.catalog = catalog = getToolByName(self.context,'portal_catalog')
def search(self, kwargs): self.catalog = getToolByName(self.context,'portal_catalog') # if IATTopic.providedBy(self.context): # contentsMethod = self.context.queryCatalog # else: # contentsMethod = self.context.getFolderContents query = self.build_query(**kwargs) self.contents = self.catalog(**query) self.len_results = len(self.contents)
self.contents = self.catalog(**query)
self.contents = catalog(**query)
def search(self, kwargs): self.catalog = getToolByName(self.context,'portal_catalog') # if IATTopic.providedBy(self.context): # contentsMethod = self.context.queryCatalog # else: # contentsMethod = self.context.getFolderContents query = self.build_query(**kwargs) self.contents = self.catalog(**query) self.len_results = len(self.contents)
self._conditionCache.nail()
def _markColumnsAsDirty(self, colnames): """Mark column indexes in `colnames` as dirty.""" assert len(colnames) > 0 if self.indexed: colindexed, cols = self.colindexed, self.cols # Mark the proper indexes as dirty for colname in colnames: if colindexed[colname]: col = cols._g_col(colname) col.index.dirty = True # Put a new nail in condition cache for each dirty index self._conditionCache.nail()
assert o == [[(1, 1), (2, 2)]]
assert o == [[(2, 2), (3, 3)]]
def test_zoom_in(): import numpy from mock import Mock, Fake sound = Mock({"numchan": 1}) sound.changed = Fake() data = numpy.array([1, 2, 3, 4], DTYPE) sound.frames = data g = Graph(sound) g.set_width(2) g.zoom_in() o = g.channels() assert o == [[(1, 1), (2, 2)]] g.zoom_out() g.set_width(4) o = g.channels() assert o == [[(1, 1), (2, 2), (3, 3), (4, 4)]]
assert g.channels() == [[(1, 1), (2, 2)]]
assert g.channels() == [[(1, 2), (3, 3)]]
def test_zoom_in_on(): import numpy from mock import Mock, Fake sound = Mock({"numchan": 1}) sound.changed = Fake() data = numpy.array([1, 2, 3, 4], DTYPE) sound.frames = data g = Graph(sound) g.set_width(2) g.zoom_in_on(0) assert g.channels() == [[(1, 1), (2, 2)]] g.zoom_out() g.zoom_in_on(1) assert g.channels() == [[(2, 2), (3, 3)]] g.zoom_out() g.zoom_in_on(2) assert g.channels() == [[(3, 3), (4, 4)]]
assert g.channels() == [[(2, 2), (3, 3)]]
assert g.channels() == [[(1, 2), (3, 3)]]
def test_zoom_in_on(): import numpy from mock import Mock, Fake sound = Mock({"numchan": 1}) sound.changed = Fake() data = numpy.array([1, 2, 3, 4], DTYPE) sound.frames = data g = Graph(sound) g.set_width(2) g.zoom_in_on(0) assert g.channels() == [[(1, 1), (2, 2)]] g.zoom_out() g.zoom_in_on(1) assert g.channels() == [[(2, 2), (3, 3)]] g.zoom_out() g.zoom_in_on(2) assert g.channels() == [[(3, 3), (4, 4)]]
assert g.channels() == [[(3, 3), (4, 4)]]
assert g.channels() == [[(1, 2), (3, 3)]]
def test_zoom_in_on(): import numpy from mock import Mock, Fake sound = Mock({"numchan": 1}) sound.changed = Fake() data = numpy.array([1, 2, 3, 4], DTYPE) sound.frames = data g = Graph(sound) g.set_width(2) g.zoom_in_on(0) assert g.channels() == [[(1, 1), (2, 2)]] g.zoom_out() g.zoom_in_on(1) assert g.channels() == [[(2, 2), (3, 3)]] g.zoom_out() g.zoom_in_on(2) assert g.channels() == [[(3, 3), (4, 4)]]
start, end = self._selection.pixels()
pstart, pend = self._selection.pixels() fstart, fend = self._selection.get()
def button_press(self, widget, event): if event.button == 1: self.pressed = True x = event.x start, end = self._selection.pixels() # a double click resumes selection. if event.type == gtk.gdk._2BUTTON_PRESS: self._selection.pin(x) # extend towards left elif self._selection.selected() and near(start, x): self._selection.pin(end) self._selection.extend(x) # extend towards right elif self._selection.selected() and near(end, x): self._selection.pin(start) self._selection.extend(x) # start fresh selection else: self._selection.pin(x)
elif self._selection.selected() and near(start, x): self._selection.pin(end)
elif self._selection.selected() and near(pstart, x): self._selection.set(fend, fend)
def button_press(self, widget, event): if event.button == 1: self.pressed = True x = event.x start, end = self._selection.pixels() # a double click resumes selection. if event.type == gtk.gdk._2BUTTON_PRESS: self._selection.pin(x) # extend towards left elif self._selection.selected() and near(start, x): self._selection.pin(end) self._selection.extend(x) # extend towards right elif self._selection.selected() and near(end, x): self._selection.pin(start) self._selection.extend(x) # start fresh selection else: self._selection.pin(x)
elif self._selection.selected() and near(end, x): self._selection.pin(start)
elif self._selection.selected() and near(pend, x): self._selection.set(fstart, fstart)
def button_press(self, widget, event): if event.button == 1: self.pressed = True x = event.x start, end = self._selection.pixels() # a double click resumes selection. if event.type == gtk.gdk._2BUTTON_PRESS: self._selection.pin(x) # extend towards left elif self._selection.selected() and near(start, x): self._selection.pin(end) self._selection.extend(x) # extend towards right elif self._selection.selected() and near(end, x): self._selection.pin(start) self._selection.extend(x) # start fresh selection else: self._selection.pin(x)
self.move_to(frame - (self._width - 1) * self.density)
self.move_to(frame - (self._width - 1) * self.density * 0.5)
def center_on(self, frame): self.move_to(frame - (self._width - 1) * self.density)
filename=self.filename())
filename=self.notebook.filename())
def save_selection_as(self, *args): dialog = SaveSelectionFileDialog(app.list_extensions(), parent=self, filename=self.filename()) filename = dialog.get_filename() if filename != None: self.notebook.save_selection_as(filename)
realstart = str(start +1) realstop = str(stop +1)
realstart = str(start+1) realstop = str(stop)
def replace(self, start, stop, value): # FIXME right now this assumes that autoindent is ON # (the '!' in 'change!' means 'toggle autoindent' realstart = str(start +1) realstop = str(stop +1) if stop >= 2147483646: realstop = "$" self.vimp.sendkeys('<Esc>:%s,%s change!\n' % (realstart,realstop) ) for l in value: self.vimp.sendkeys(l+'\n') self.vimp.sendkeys('.\n')
self.vimp.sendkeys('<Esc>:%s,%s change!\n' % (realstart,realstop) )
if start == stop: self.vimp.sendkeys('<Esc>:%s,%s insert!\n' % (realstart,realstart) ) else: self.vimp.sendkeys('<Esc>:%s,%s change!\n' % (realstart,realstop) )
def replace(self, start, stop, value): # FIXME right now this assumes that autoindent is ON # (the '!' in 'change!' means 'toggle autoindent' realstart = str(start +1) realstop = str(stop +1) if stop >= 2147483646: realstop = "$" self.vimp.sendkeys('<Esc>:%s,%s change!\n' % (realstart,realstop) ) for l in value: self.vimp.sendkeys(l+'\n') self.vimp.sendkeys('.\n')
self.contents[original.linenum:original.linenum] = [str(original), str(addition)]
self.contents[original.linenum:original.linenum+1] = [str(original), str(addition)]
def split_todo(self, original, addition): r""" >>> i = TodoList([',INBOX','\tmust do X','\tmust do Y @CURRENT', ',CONTEXTS', '\t#FRED', '\t\tdo another thing @FOO']) >>> i.split_todo(i.current_todo(), '\tthen do Z') >>> j = i.contents >>> j[1] '\tmust do X' >>> j[2] '\tmust do Y @CURRENT' >>> j[3] '\tthen do Z' >>> j[4] ',CONTEXTS' """ """ Given an original todo, insert a new one underneath it """ self.contents[original.linenum:original.linenum] = [str(original), str(addition)] self.sync()
self.contents[l:l+1] = [self.contents[l+1]]
self.contents[l:l+2] = [self.contents[l+1]]
def mark_current_done(self): r""" >>> i = TodoList([',INBOX','\tmust do X @CURRENT','\tmust do Y', ',CONTEXTS', '\t#FRED', '\t\tdo another thing @FOO']) >>> i.mark_current_done() >>> i.contents[0:3] [',INBOX', '\tmust do Y', ',CONTEXTS'] >>> 'must do X' in i.contents[-1] True >>> '@CURRENT' not in i.contents[-1] False """ current_todo = self.current_todo() l = current_todo.linenum current_todo.unset_current() # remove current tag current_todo.remove_tags_if(lambda x: x.same_name_as(Tag.ignore_tag())) # remove ignore tags repeat_tag = [ t for t in current_todo.tags() if t.is_repeat() ] done = self.contents[l] if repeat_tag: # create ignoreuntil from this point ignore_tag = repeat_tag[0].ignore_from_repeat(datetime.datetime.now()) current_todo.add_tag(ignore_tag) self.contents[current_todo.linenum] = str(current_todo) self.sync() else: # remove entry from current position self.contents[l:l+1] = [self.contents[l+1]] # put timestamped copy at end of file done = "\t"+datetime.datetime.now().isoformat() + " " + done self.contents[-1:] = [self.contents[-1], done ] self.sync()
print "usage: python imacro_asm.py infile.jsasm outfile.c.out"
print("usage: python imacro_asm.py infile.jsasm outfile.c.out")
def fail(msg, *args): raise ValueError("%s at %s:%d" % (msg % args, filename, lineno + 1))
def recv(self, block_size=65536): cothread.select([self.sock.fileno()], [], [])
def recv(self, block_size=65536, timeout=0.2): cothread.select([self.sock.fileno()], [], [], timeout)
def recv(self, block_size=65536): cothread.select([self.sock.fileno()], [], []) chunk = self.sock.recv(block_size) if not chunk: raise self.EOF('Connection closed by server') return chunk
squared = u'\u00B2'
def read(self): '''Can be called at any time to read the most recent buffer.''' return 1e-3 * self.buffer.read(self.notify_size)
pass
self.show_x = show_x self.show_y = show_y
def show_xy(self, show_x, show_y): pass
return numpy.nanmin(value), numpy.nanmax(value)
ix = (self.show_x, self.show_y) ix = numpy.nonzero(ix)[0] return numpy.nanmin(value[:, ix]), numpy.nanmax(value[:, ix])
def get_minmax(self, value): value = self.compute(value) return numpy.nanmin(value), numpy.nanmax(value)
def get_minmax(self, value): return numpy.nanmin(value), numpy.nanmax(value)
def get_minmax(self, value): return numpy.nanmin(value), numpy.nanmax(value)
self.show_x = show_x self.show_y = show_y
mode_common.show_xy(self, show_x, show_y)
def show_xy(self, show_x, show_y): self.show_x = show_x self.show_y = show_y self.set_visible()
self.show_x = show_x self.show_y = show_y
mode_common.show_xy(self, show_x, show_y)
def show_xy(self, show_x, show_y): self.show_x = show_x self.show_y = show_y self.cxb.setVisible(show_x) self.cyb.setVisible(show_y)
self.channel = int(self.ui.channel_id.text()) self.bpm_name = 'BPM id %d' % self.channel self.monitor.set_id(self.channel)
channel = int(self.ui.channel_id.text()) if channel != self.channel: self.channel = channel self.bpm_name = 'BPM id %d' % channel self.monitor.set_id(channel)
def set_channel_id(self): self.channel = int(self.ui.channel_id.text()) self.bpm_name = 'BPM id %d' % self.channel self.monitor.set_id(self.channel)
print 'Generating manifest'
def generate_manifest(self, path, prefix, parts, parts_digest, file, key, iv, cert_path, ec2cert_path, private_key_path, target_arch, image_size, bundled_size, image_digest, user, kernel, ramdisk, mapping=None, product_codes=None, ancestor_ami_ids=None): print 'Generating manifest'
def register_image(self, name=None, description=None, image_location=None, architecture=None, kernel_id=None, ramdisk_id=None, root_device_name=None, block_device_map=None): """ Register an image. :type name: string :param name: The name of the AMI. Valid only for EBS-based images. :type description: string :param description: The description of the AMI. :type image_location: string :param image_location: Full path to your AMI manifest in Amazon S3 storage. Only used for S3-based AMI's. :type architecture: string :param architecture: The architecture of the AMI. Valid choices are: i386 | x86_64 :type kernel_id: string :param kernel_id: The ID of the kernel with which to launch the instances :type root_device_name: string :param root_device_name: The root device name (e.g. /dev/sdh) :type block_device_map: :class:`boto.ec2.blockdevicemapping.BlockDeviceMapping` :param block_device_map: A BlockDeviceMapping data structure describing the EBS volumes associated with the Image. :rtype: string :return: The new image id """ params = {} if name: params['Name'] = name if description: params['Description'] = description if architecture: params['Architecture'] = architecture if kernel_id: params['KernelId'] = kernel_id if ramdisk_id: params['RamdiskId'] = ramdisk_id if image_location: params['ImageLocation'] = image_location if root_device_name: params['RootDeviceName'] = root_device_name if block_device_map: block_device_map.build_list_params(params) rs = self.get_object('RegisterImage', params, ResultSet) image_id = getattr(rs, 'imageId', None) return image_id EC2Connection.register_image = register_image
def endElement(self, name, value, connection): if name == 'virtualName': self.current_vname = value elif name == 'device' or name == 'deviceName': if hasattr(self, 'current_vname') and self.current_vname: self[self.current_vname] = value self.current_vname = None else: self.current_name = value
return boto.connect_ec2(
return EC2Connection(
def make_connection(self): if not self.ec2_user_access_key: self.ec2_user_access_key = self.environ['EC2_ACCESS_KEY'] if not self.ec2_user_access_key: print 'EC2_ACCESS_KEY environment variable must be set.' raise ConnectionFailed
filename = '%s.%d' % (file, i)
filename = '%s.%02d' % (file, i)
def split_file(self, file, chunk_size): parts = [] parts_digest = [] file_size = os.path.getsize(file) in_file = open(file, 'rb') number_parts = int(file_size / chunk_size) number_parts += 1 bytes_read = 0 for i in range(0, number_parts, 1): filename = '%s.%d' % (file, i) part_digest = sha() file_part = open(filename, 'wb') print 'Part:', self.get_relative_filename(filename) part_bytes_written = 0 while part_bytes_written < IMAGE_SPLIT_CHUNK: data = in_file.read(IMAGE_IO_CHUNK) file_part.write(data) part_digest.update(data) data_len = len(data) part_bytes_written += data_len bytes_read += data_len if bytes_read >= file_size: break file_part.close() parts.append(filename) parts_digest.append(hexlify(part_digest.digest()))
in_file = open(image_file, 'rb') sha_image = sha() buf = in_file.read(IMAGE_IO_CHUNK) while buf: sha_image.update(buf) buf = in_file.read(IMAGE_IO_CHUNK) return (image_size, hexlify(sha_image.digest()))
return image_size
def check_image(self, image_file, path): print 'Checking image' if not os.path.exists(path): os.makedirs(path) image_size = os.path.getsize(image_file) if self.debug: print 'Image Size:', image_size, 'bytes' in_file = open(image_file, 'rb') sha_image = sha() buf = in_file.read(IMAGE_IO_CHUNK) while buf: sha_image.update(buf) buf = in_file.read(IMAGE_IO_CHUNK) return (image_size, hexlify(sha_image.digest()))
print 'Tarring image' tar_file = '%s.tar.gz' % os.path.join(path, prefix) outfile = open(tar_file, 'wb')
targz = '%s.tar.gz' % os.path.join(path, prefix) targzfile = open(targz, 'w') tar_cmd = ['tar', 'ch', '-S']
def tarzip_image( self, prefix, file, path, ): Util().check_prerequisite_command('tar')
tar_cmd = ['tar', 'ch', '-S']
def tarzip_image( self, prefix, file, path, ): Util().check_prerequisite_command('tar')
p1 = Popen(tar_cmd, stdout=PIPE) p2 = Popen(['gzip'], stdin=p1.stdout, stdout=outfile) p2.communicate() outfile.close if os.path.getsize(tar_file) <= 0: print 'Could not tar image'
tarproc = subprocess.Popen(tar_cmd, stdout=subprocess.PIPE) zipproc = subprocess.Popen(['gzip'], stdin=subprocess.PIPE, stdout=targzfile) sha_image = sha() buf=os.read(tarproc.stdout.fileno(), 8196) while buf: zipproc.stdin.write(buf) sha_image.update(buf) buf=os.read(tarproc.stdout.fileno(), 8196) zipproc.stdin.close(); targzfile.close() if os.path.getsize(targz) <= 0: print 'Could not tar/compress image'
def tarzip_image( self, prefix, file, path, ): Util().check_prerequisite_command('tar')
return tar_file
return (targz, hexlify(sha_image.digest()))
def tarzip_image( self, prefix, file, path, ): Util().check_prerequisite_command('tar')
key = hex(BN.rand(16 * 8))[2:34].replace('L', 'c')
key = hex(BN.rand(17 * 8,top=0))[4:36]
def encrypt_image(self, file): print 'Encrypting image' enc_file = '%s.part' % file.replace('.tar.gz', '')
iv = hex(BN.rand(16 * 8))[2:34].replace('L', 'c')
iv = hex(BN.rand(17 * 8,top=0))[4:36]
def encrypt_image(self, file): print 'Encrypting image' enc_file = '%s.part' % file.replace('.tar.gz', '')
class ConnectionFailed: def __init__(self): self.message = "Connection failed"
def __init__(self):
sys.exit(1)
raise ConnectionFailed
def make_connection(self):
user_eucarc = os.path.join(os.getenv('HOME'), ".eucarc")
user_eucarc = None if 'HOME' in os.environ: os.path.join(os.getenv('HOME'), ".eucarc")
def setup_environ(self):
elif os.path.exists(user_eucarc):
elif user_eucarc is not None and os.path.exists(user_eucarc):
def setup_environ(self):
try: Util().check_prerequisite_command(self.MAKEFS_CMD) except NotFoundError: sys.exit(1)
Util().check_prerequisite_command(self.MAKEFS_CMD)
def make_fs(self, image_path): try: Util().check_prerequisite_command(self.MAKEFS_CMD) except NotFoundError: sys.exit(1)
sys.exit(1)
raise UnsupportedException
def create_image(self, size_in_MB, image_path):
sys.exit(1)
raise UnsupportedException
def make_fs(self, image_path):
sys.exit(1)
raise UnsupportedException
def make_essential_devs(self, image_path):
sys.exit(1)
def usage(self, compat=False):
sys.exit(1)
raise NotFoundError
def get_environ(self, name): if self.environ.has_key(name):
try: Util().check_prerequisite_command('tar') except NotFoundError: sys.exit(1)
Util().check_prerequisite_command('tar')
def tarzip_image(self, prefix, file, path): try: Util().check_prerequisite_command('tar') except NotFoundError: sys.exit(1)
sys.exit(1)
raise CommandFailed
def tarzip_image(self, prefix, file, path): try: Util().check_prerequisite_command('tar') except NotFoundError: sys.exit(1)
sys.exit(1)
raise UnsupportedException
def make_image(self, size_in_MB, excludes, prefix, destination_path): image_file = '%s.img' % (prefix) image_path = '%s/%s' % (destination_path, image_file) if not os.path.exists(destination_path): os.makedirs(destination_path)
try: Util().check_prerequisite_command('losetup') except NotFoundError: sys.exit(1)
Util().check_prerequisite_command('losetup')
def create_loopback(self, image_path):
sys.exit(1)
raise CommandFailed
def create_loopback(self, image_path):
try: Util().check_prerequisite_command('mount') except NotFoundError: sys.exit(1)
Util().check_prerequisite_command('mount')
def mount_image(self, image_path): try: Util().check_prerequisite_command('mount') except NotFoundError: sys.exit(1)
try: Util().check_prerequisite_command('umount') except NotFoundError: sys.exit(1)
Util().check_prerequisite_command('umount')
def unmount_image(self, mount_point): try: Util().check_prerequisite_command('umount') except NotFoundError: sys.exit(1)