rem
stringlengths 0
322k
| add
stringlengths 0
2.05M
| context
stringlengths 8
228k
|
---|---|---|
javascript = [LocalJSLink('beaker', '/static/javascript/searchbar_v4.js')]
|
javascript = [LocalJSLink('beaker', '/static/javascript/searchbar_v5.js')]
|
def __json__(self): return { 'field_id' : self.field_id, }
|
javascript = [LocalJSLink('beaker', '/static/javascript/provision.js'),LocalJSLink('beaker', '/static/javascript/searchbar_v4.js')]
|
javascript = [LocalJSLink('beaker', '/static/javascript/provision.js'),LocalJSLink('beaker', '/static/javascript/searchbar_v5.js')]
|
def display(self,value=None,**params): if 'options' in params: if 'searchvalue' in params['options']: params['searchvalue'] = params['options']['searchvalue'] if 'action' in params: params['all_history'] = params['action'] return super(SystemHistory, self).display(value,**params)
|
return (self.recipe.watchdog.kill_time - datetime.utcnow()).seconds
|
delta = self.recipe.watchdog.kill_time - datetime.utcnow() return delta.seconds + (86400 * delta.days)
|
def status_watchdog(self): """ Return the number of seconds left on the current watchdog if it exists. """ if self.recipe.watchdog: return (self.recipe.watchdog.kill_time - datetime.utcnow()).seconds else: return False
|
data_setup.create_system(fqdn='nogroup.system')
|
data_setup.create_system(fqdn=u'nogroup.system')
|
def test_filter_by_group(self): data_setup.create_system(fqdn='nogroup.system') self.group = data_setup.create_group() data_setup.create_system(fqdn='grouped.system').groups.append(self.group) session.flush() feed_url = urljoin(get_server_base(), '?' + urlencode({ 'tg_format': 'atom', 'list_tgp_order': '-date_modified', 'systemsearch-0.table': 'System/Group', 'systemsearch-0.operation': 'is', 'systemsearch-0.value': self.group.group_name})) feed = lxml.etree.parse(urlopen(feed_url)).getroot() self.assert_(not self.feed_contains_system(feed, 'nogroup.system')) self.assert_(self.feed_contains_system(feed, 'grouped.system'))
|
data_setup.create_system(fqdn='grouped.system').groups.append(self.group)
|
data_setup.create_system(fqdn=u'grouped.system').groups.append(self.group)
|
def test_filter_by_group(self): data_setup.create_system(fqdn='nogroup.system') self.group = data_setup.create_group() data_setup.create_system(fqdn='grouped.system').groups.append(self.group) session.flush() feed_url = urljoin(get_server_base(), '?' + urlencode({ 'tg_format': 'atom', 'list_tgp_order': '-date_modified', 'systemsearch-0.table': 'System/Group', 'systemsearch-0.operation': 'is', 'systemsearch-0.value': self.group.group_name})) feed = lxml.etree.parse(urlopen(feed_url)).getroot() self.assert_(not self.feed_contains_system(feed, 'nogroup.system')) self.assert_(self.feed_contains_system(feed, 'grouped.system'))
|
sel.wait_for_page_to_load('30000')
|
try: sel.click('link=Show all') sel.wait_for_page_to_load('30000') except: pass
|
def check_column_sort(self, column): sel = self.selenium sel.wait_for_page_to_load('30000') sel.click('//table[@id="widget"]/thead/th[%d]//a[@href]' % column) sel.wait_for_page_to_load('30000') row_count = int(sel.get_xpath_count( '//table[@id="widget"]/tbody/tr/td[%d]' % column)) cell_values = [sel.get_table('widget.%d.%d' % (row, column - 1)) # zero-indexed for row in range(0, row_count)] self.assert_(len(set(cell_values)) > 1) # make sure we're checking something assert_sorted(cell_values, key=lambda x: x.lower())
|
log.append('%s: Invalid Key %s ' % data['key'])
|
log.append('%s: Invalid Key %s ' % (system.fqdn, data['key']))
|
def _from_csv(cls,system,data,csv_type,log): """ Import data from CSV file into System Objects """ if 'key' in data and data['key']: try: key = Key.by_name(data['key']) except InvalidRequestError: log.append('%s: Invalid Key %s ' % data['key']) return False else: log.append('%s: Key must not be blank!' % system.fqdn) return False if 'key_value' in data and data['key_value']: if key.numeric: system_key_values = system.key_values_int try: key_value = Key_Value_Int.by_key_value(system, key, data['key_value']) except InvalidRequestError: key_value = Key_Value_Int(key=key, key_value=data['key_value']) else: system_key_values = system.key_values_string try: key_value = Key_Value_String.by_key_value(system, key, data['key_value']) except InvalidRequestError: key_value = Key_Value_String(key=key, key_value=data['key_value']) else: log.append('%s: Key Value must not be blank!' % system.fqdn) return False deleted = False if 'deleted' in data: deleted = smart_bool(data['deleted']) if deleted: if key_value in system_key_values: activity = SystemActivity(identity.current.user, 'CSV', 'Removed', 'Key/Value', '%s/%s' % (data['key'],data['key_value']), '') system.activity.append(activity) system_key_values.remove(key_value) if not key_value.id: session.expunge(key_value) else: if key_value not in system_key_values: activity = SystemActivity(identity.current.user, 'CSV', 'Added', 'Key/Value', '', '%s/%s' % (data['key'],data['key_value'])) system.activity.append(activity) system_key_values.append(key_value) session.save_or_update(key_value) session.flush([key_value]) return True
|
default_whiteboard_title = 'N/A'
|
default_whiteboard_title = ''
|
def get_results(self,arch,whiteboard=None): try: if not whiteboard: return_list = [] for w in self.results[arch]: return_list += self.results[arch][w] return return_list else: return self.results[arch][whiteboard] except KeyError, (e): #This is fine, just means that this task has no entry for a given arch/whiteboard #log.debug('Index does not exist Arch %s whiteboard:%s ' % (arch,whiteboard)) return []
|
whiteboard_title = whiteboard_name = self.default_whiteboard_title fields.append(InnerGrid.Column(name=whiteboard_name,
|
whiteboard_title = whiteboard_name = self.default_whiteboard_title random.seed() fields.append(InnerGrid.Column(name="%s_%s" % (whiteboard_name,random.random()),
|
def inner_data_grid(self,data,this_arch,show_headers): """ inner_data_grid() displays the grid that is the recipe whiteboard grid for each arch. """ fields = [] my_list = [] sorted_keys = sorted(set(self.whiteboards_used[this_arch])) for whiteboard in sorted_keys: whiteboard_title = whiteboard_name = orig_whiteboard_name = whiteboard if not whiteboard_name: whiteboard_title = whiteboard_name = self.default_whiteboard_title fields.append(InnerGrid.Column(name=whiteboard_name, getter=self.display_whiteboard_results(orig_whiteboard_name,this_arch), title=orig_whiteboard_name)) options = {'show_headers' : show_headers } #my_list = data.get_results(this_arch) return InnerGrid(fields=fields).display([data],options=options)
|
print 'These are the dupe errors: %s' % dupe_errors
|
def update_products(xml_file): dom = etree.parse(xml_file) xpath_string = '//cpe' cpes = dom.xpath(xpath_string) session.begin() try: to_add = {} dupe_errors = [] for cpe in cpes: cpe_text = cpe.text if cpe_text in to_add: dupe_errors.append(cpe_text) else: to_add[cpe_text] = 1 for cpe_to_add in to_add: try: prod = Product.by_name(u'%s' % cpe_to_add) except InvalidRequestError, e: if '%s' % e == 'No rows returned for one()': session.save(Product(u'%s' % cpe_to_add)) continue else: raise print 'These are the dupe errors: %s' % dupe_errors session.commit() finally: session.rollback()
|
|
working = SystemStatus.by_name(u'Working')
|
def queued_recipes(*args): recipes = Recipe.query()\ .join('status')\ .join('systems')\ .join(['recipeset','priority'])\ .filter( or_( and_(Recipe.status==TaskStatus.by_name(u'Queued'), System.user==None, RecipeSet.lab_controller==None ), and_(Recipe.status==TaskStatus.by_name(u'Queued'), System.user==None, RecipeSet.lab_controller_id==System.lab_controller_id ) ) ) # Order recipes by priority. # FIXME Add secondary order by number of matched systems. if True: recipes = recipes.order_by(TaskPriority.id.desc()) if not recipes.count(): return False log.debug("Entering queued_recipes routine") working = SystemStatus.by_name(u'Working') for recipe in recipes: session.begin() try: systems = recipe.dyn_systems.filter(and_(System.user==None, System.status==working)) # Order systems by owner, then Group, finally shared for everyone. # FIXME Make this configurable, so that a user can specify their scheduling # preference from the job. # <recipe><scheduler method='random|fair|owner|group'/></recipe> if True: user = recipe.recipeset.job.owner systems = systems.order_by(case([(System.owner==user, 1), (System.owner!=user and Group.systems==None, 2)], else_=3)) if recipe.recipeset.lab_controller: # First recipe of a recipeSet determines the lab_controller systems = systems.filter( System.lab_controller==recipe.recipeset.lab_controller ) system = systems.first() if system: log.debug("System : %s is available for Recipe %s" % (system, recipe.id)) # Atomic operation to put recipe in Scheduled state if session.connection(Recipe).execute(recipe_table.update( and_(recipe_table.c.id==recipe.id, recipe_table.c.status_id==TaskStatus.by_name(u'Queued').id)), status_id=TaskStatus.by_name(u'Scheduled').id).rowcount == 1: # Even though the above put the recipe in the "Scheduled" state # it did not execute the update_status method. recipe.schedule() # Atomic operation to reserve the system if session.connection(Recipe).execute(system_table.update( and_(system_table.c.id==system.id, system_table.c.user_id==None)), user_id=recipe.recipeset.job.owner.user_id).rowcount == 1: recipe.system = system recipe.recipeset.lab_controller = system.lab_controller recipe.systems = [] # Create the watchdog without an Expire time. log.debug("Created watchdog for recipe id: %s and system: %s" % (recipe.id, system)) recipe.watchdog = Watchdog(system=recipe.system) activity = SystemActivity(recipe.recipeset.job.owner, "Scheduler", "Reserved", "User", "", "%s" % recipe.recipeset.job.owner ) system.activity.append(activity) log.info("recipe ID %s moved from Queued to Scheduled" % recipe.id) else: # The system was taken from underneath us. Put recipe # back into queued state and try again. recipe.queue() else: #Some other thread beat us. Skip this recipe now. # Depending on scheduler load it should be safe to run multiple # Queued processes.. Also, systems that we don't directly # control, for example, systems at a remote location that can # pull jobs but not have any pushed onto them. These systems # could take a recipe and put it in running state. Not sure how # to deal with multi-host jobs at remote locations. May need to # enforce single recipes for remote execution. pass session.commit() except exceptions.Exception, e: session.rollback() log.error("Failed to commit due to :%s" % e) session.close() log.debug("Exiting queued_recipes routine") return True
|
|
self.logger.info("Panic detected for system: %s" % self.watchdog['system'])
|
self.proxy.logger.info("Panic detected for system: %s" % self.watchdog['system'])
|
def update(self): """ If the log exists and the file has grown then upload the new piece """ if os.path.exists(self.log): file = open(self.log, "r") where = self.where file.seek(where) line = file.read(65536) self.where = file.tell() file.close() #FIXME make this work on a list of search items # Also, allow it to be disabled if line.find("Kernel panic") != -1: self.logger.info("Panic detected for system: %s" % self.watchdog['system']) # Report the panic #self.task_result(self.watchdog['task_id']) # Abort the recipe #self.recipe_abort(self.watchdog['recipe_id']) if not line: return False else: size = len(line) data = base64.encodestring(line) md5sum = md5_constructor(line).hexdigest() self.proxy.recipe_upload_file(self.watchdog['recipe_id'], "/", self.filename, size, md5sum, where, data) return True
|
repos = self.doc.createElement('repos')
|
self.repos = self.doc.createElement('repos')
|
def __init__(self, *args, **kwargs): self.node = self.doc.createElement('recipe') self.node.setAttribute('whiteboard','') self.andDistroRequires = self.doc.createElement('and') self.andHostRequires = self.doc.createElement('and') distroRequires = self.doc.createElement('distroRequires') hostRequires = self.doc.createElement('hostRequires') repos = self.doc.createElement('repos') distroRequires.appendChild(self.andDistroRequires) hostRequires.appendChild(self.andHostRequires) self.node.appendChild(distroRequires) self.node.appendChild(hostRequires) self.node.appendChild(repos)
|
self.node.appendChild(repos)
|
self.node.appendChild(self.repos)
|
def __init__(self, *args, **kwargs): self.node = self.doc.createElement('recipe') self.node.setAttribute('whiteboard','') self.andDistroRequires = self.doc.createElement('and') self.andHostRequires = self.doc.createElement('and') distroRequires = self.doc.createElement('distroRequires') hostRequires = self.doc.createElement('hostRequires') repos = self.doc.createElement('repos') distroRequires.appendChild(self.andDistroRequires) hostRequires.appendChild(self.andHostRequires) self.node.appendChild(distroRequires) self.node.appendChild(hostRequires) self.node.appendChild(repos)
|
javascript = [LocalJSLink('bkr', '/static/javascript/reserve_workflow_v2.js')]
|
javascript = [LocalJSLink('bkr', '/static/javascript/reserve_workflow_v3.js')]
|
def update_params(self,d): log.debug(d) if 'value' in d: if 'distro_ids' in d['value']: if(isinstance(d['value']['distro_ids'],list)): for distro_id in d['value']['distro_ids']: d['hidden_fields'] = [HiddenField(name='distro_id',attrs = {'value' : distro_id})] + d['hidden_fields'][0:]
|
flash(_(u'Unable to find system with id of %s' % id))
|
flash(_(u'Unable to find system with id of %s' % system_id))
|
def report_problem(self, system_id, recipe_id=None, problem_description=None): """ Allows users to report a problem with a system to the system's owner. """ try: system = System.by_id(system_id, identity.current.user) except InvalidRequestError: flash(_(u'Unable to find system with id of %s' % id)) redirect('/') try: recipe = Recipe.by_id(recipe_id) except InvalidRequestError: recipe = None if request.method == 'POST': mail.system_problem_report(system, problem_description, recipe, identity.current.user) activity = SystemActivity(identity.current.user, 'WEBUI', 'Reported problem', 'Status', None, problem_description) system.activity.append(activity) flash(_(u'Your problem report has been forwarded to the system owner')) redirect('/view/%s' % system.fqdn) return dict( title=_(u'Report a problem with %s') % system.fqdn, form=self.report_problem_form, method='post', action='report_problem', value={}, options={'system': system, 'recipe': recipe} )
|
def my_cmp(x,y): m1 = re.search('^(.+?)(\d{1,})?$',x) m2 = re.search('^(.+?)(\d{1,})?$',y) try: distro_1 = m1.group(1).lower() except AttributeError,e: return -1 try: distro_2 = m2.group(1).lower() except AttributeError,e: return 1 distro_1_ver = int(m1.group(2) or 0) distro_2_ver = int(m2.group(2) or 0) if not distro_1 or not distro_2: return distro_1 and 1 or -1 if distro_1 == distro_2: return distro_1_ver and (distro_2_ver and (distro_1_ver < distro_2_ver and -1 or 1) or 1) or -1 else: return distro_1 < distro_2 and -1 or 1
|
def my_cmp(x,y): m1 = re.search('^(.+?)(\d{1,})?$',x) m2 = re.search('^(.+?)(\d{1,})?$',y) try: distro_1 = m1.group(1).lower() except AttributeError,e: #x has no value, it goes first return -1
|
|
@staticmethod def my_cmp(x,y): m1 = re.search('^(.+?)(\d{1,})?$',x) m2 = re.search('^(.+?)(\d{1,})?$',y) try: distro_1 = m1.group(1).lower() except AttributeError,e: return -1 try: distro_2 = m2.group(1).lower() except AttributeError,e: return 1 distro_1_ver = int(m1.group(2) or 0) distro_2_ver = int(m2.group(2) or 0) if not distro_1 or not distro_2: return distro_1 and 1 or -1 if distro_1 == distro_2: return distro_1_ver and (distro_2_ver and (distro_1_ver < distro_2_ver and -1 or 1) or 1) or -1 else: return distro_1 < distro_2 and -1 or 1
|
def my_cmp(x,y): m1 = re.search('^(.+?)(\d{1,})?$',x) m2 = re.search('^(.+?)(\d{1,})?$',y) try: distro_1 = m1.group(1).lower() except AttributeError,e: #x has no value, it goes first return -1
|
|
params['all_distro_familys'] = [('','None Selected')] + [[osmajor,osmajor] for osmajor in sorted(e,cmp=my_cmp )]
|
params['all_distro_familys'] = [('','None Selected')] + [[osmajor,osmajor] for osmajor in sorted(e,cmp=self.my_cmp )]
|
def display(self,value=None,**params): if 'options' in params: for k in params['options'].keys(): params[k] = params['options'][k] del params['options'][k] params['all_arches'] = [[elem.arch,elem.arch] for elem in model.Arch.query()] params['all_tags'] = [['','None Selected']] + [[elem.tag,elem.tag] for elem in model.DistroTag.query()] params['all_methods'] = [[elem,elem] for elem in model.Distro.all_methods()] e = [elem.osmajor for elem in model.OSMajor.query()] params['all_distro_familys'] = [('','None Selected')] + [[osmajor,osmajor] for osmajor in sorted(e,cmp=my_cmp )] return super(ReserveWorkflow,self).display(value,**params)
|
query = systems
|
try: query = systems.outerjoin(['groups','users'], aliased=True) except AttributeError, (e): log.error('A non Query object has been passed into the available method, using default query instead: %s' % e) query = cls.query().outerjoin(['groups','users'], aliased=True)
|
def available(cls, user,systems=None): """ Builds on all. Only systems which this user has permission to reserve. If a system is loaned then its only available for that person. """ if systems: query = systems else: query = System.all(user) query = query.filter(and_( System.status==SystemStatus.by_name(u'Working'), case([(System.groups == None,1),(System.groups.any(User.groups.any(User.user_id == user.user_id)),1)],else_ = 0), or_(and_(System.owner==user, System.loaned==None), System.loaned==user, and_(System.shared==True, System.loaned==None ), and_(System.shared==True, System.loaned==None, User.user_id==user.user_id ) ) ) ) return query
|
case([(System.groups == None,1),(System.groups.any(User.groups.any(User.user_id == user.user_id)),1)],else_ = 0),
|
def available(cls, user,systems=None): """ Builds on all. Only systems which this user has permission to reserve. If a system is loaned then its only available for that person. """ if systems: query = systems else: query = System.all(user) query = query.filter(and_( System.status==SystemStatus.by_name(u'Working'), case([(System.groups == None,1),(System.groups.any(User.groups.any(User.user_id == user.user_id)),1)],else_ = 0), or_(and_(System.owner==user, System.loaned==None), System.loaned==user, and_(System.shared==True, System.loaned==None ), and_(System.shared==True, System.loaned==None, User.user_id==user.user_id ) ) ) ) return query
|
|
self.get_xml_attr('name', unicode, u'None')
|
return self.get_xml_attr('name', unicode, u'None')
|
def __getattr__(self, attrname): if attrname == 'name': self.get_xml_attr('name', unicode, u'None') else: raise AttributeError, attrname
|
print job.workflow
|
def __getattr__(self, attrname): if attrname == 'name': return self.get_xml_attr('name', unicode, u'None') else: raise AttributeError, attrname
|
|
print job.submitter
|
def __getattr__(self, attrname): if attrname == 'name': return self.get_xml_attr('name', unicode, u'None') else: raise AttributeError, attrname
|
|
self.system.activity.append(SystemActivity(self.system.user,service='Cobbler API',action='Task',new_value='Fail: %s' % e))
|
self.system.activity.append(SystemActivity(self.system.user,service='Cobbler API',action='Task',field_name='', new_value='Fail: %s' % e))
|
def wait_for_event(self, task_id): """ Wait for cobbler task to finish, return True on success raise an exception if it fails. raise an exception if it takes more then 5 minutes """ try: expiredelta = datetime.utcnow() + timedelta(minutes=10) while(True): for line in self.get_event_log(task_id).split('\n'): if line.find("### TASK COMPLETE ###") != -1: return True if line.find("### TASK FAILED ###") != -1: raise BX(_("Cobbler Task:%s Failed" % task_id)) if datetime.utcnow() > expiredelta: raise BX(_('Cobbler Task:%s Timed out' % task_id))
|
self.user = None
|
def action_release(self): self.user = None # Attempt to remove Netboot entry # and turn off machine, but don't fail if we can't if self.release_action: try: self.remote.release(power=False) self.release_action.do(self) except BX, error: pass else: try: self.remote.release() except: pass
|
|
result_string = '/tasks/do_search?task=%s&result_id=%s&whiteboard=%s&arch_id=%s&' % \
|
result_string = '/tasks/executed?task=%s&result_id=%s&whiteboard=%s&arch_id=%s&' % \
|
def _create_task_list_params(self,query_obj,result): """ _create_task_list_params() takes a query obj of the type generated in generate_data() and will return a string representation of a URL pointing to a page which will display the results of the given task """ job_string = '' for job in self.job_ids: if job: job_string += 'job_id=%s&' % job result_string = '/tasks/do_search?task=%s&result_id=%s&whiteboard=%s&arch_id=%s&' % \ (query_obj.task_name, result, query_obj.whiteboard or '', query_obj.arch_id) return result_string + job_string
|
if not System.free(user).filter(System.fqdn == system).first()
|
if not System.free(user).filter(System.fqdn == system).first():
|
def queued_recipes(*args): working = SystemStatus.by_name(u'Working') recipes = Recipe.query()\ .join('status')\ .join('systems')\ .join(['recipeset','priority'])\ .filter( or_( and_(Recipe.status==TaskStatus.by_name(u'Queued'), System.user==None, System.status==working, RecipeSet.lab_controller==None ), and_(Recipe.status==TaskStatus.by_name(u'Queued'), System.user==None, System.status==working, RecipeSet.lab_controller_id==System.lab_controller_id ) ) ) # Order recipes by priority. # FIXME Add secondary order by number of matched systems. if True: recipes = recipes.order_by(TaskPriority.id.desc()) if not recipes.count(): return False log.debug("Entering queued_recipes routine") for recipe in recipes: session.begin() try: systems = recipe.dyn_systems.filter(and_(System.user==None, System.status==working)) # Order systems by owner, then Group, finally shared for everyone. # FIXME Make this configurable, so that a user can specify their scheduling # preference from the job. # <recipe><scheduler method='random|fair|owner|group'/></recipe> if True: user = recipe.recipeset.job.owner systems = systems.order_by(case([(System.owner==user, 1), (System.owner!=user and Group.systems==None, 2)], else_=3)) if recipe.recipeset.lab_controller: # First recipe of a recipeSet determines the lab_controller systems = systems.filter( System.lab_controller==recipe.recipeset.lab_controller ) system = systems.first() if system: log.debug("System : %s is available for Recipe %s" % (system, recipe.id)) # Check to see if user still has proper permissions to use system # Remember the mapping of available systems could have happend hours or even # days ago and groups or loans could have been put in place since. if not System.free(user).filter(System.fqdn == system).first() log.debug("System : %s recipe: %s no longer has access. removing" % (system, recipe.id)) recipe.systems.remove(system) # Atomic operation to put recipe in Scheduled state if session.connection(Recipe).execute(recipe_table.update( and_(recipe_table.c.id==recipe.id, recipe_table.c.status_id==TaskStatus.by_name(u'Queued').id)), status_id=TaskStatus.by_name(u'Scheduled').id).rowcount == 1: recipe.createRepo() # Even though the above put the recipe in the "Scheduled" state # it did not execute the update_status method. recipe.schedule() # Atomic operation to reserve the system if session.connection(Recipe).execute(system_table.update( and_(system_table.c.id==system.id, system_table.c.user_id==None)), user_id=recipe.recipeset.job.owner.user_id).rowcount == 1: recipe.system = system recipe.recipeset.lab_controller = system.lab_controller recipe.systems = [] # Create the watchdog without an Expire time. log.debug("Created watchdog for recipe id: %s and system: %s" % (recipe.id, system)) recipe.watchdog = Watchdog(system=recipe.system) activity = SystemActivity(recipe.recipeset.job.owner, "Scheduler", "Reserved", "User", "", "%s" % recipe.recipeset.job.owner ) system.activity.append(activity) log.info("recipe ID %s moved from Queued to Scheduled" % recipe.id) else: # The system was taken from underneath us. Put recipe # back into queued state and try again. raise BX(_('System %s was stolen from underneath us. will try again.' % system)) else: #Some other thread beat us. Skip this recipe now. # Depending on scheduler load it should be safe to run multiple # Queued processes.. Also, systems that we don't directly # control, for example, systems at a remote location that can # pull jobs but not have any pushed onto them. These systems # could take a recipe and put it in running state. Not sure how # to deal with multi-host jobs at remote locations. May need to # enforce single recipes for remote execution. pass session.commit() except exceptions.Exception, e: session.rollback() log.error("Failed to commit due to :%s" % e) session.close() log.debug("Exiting queued_recipes routine") return True
|
text = request.simple_cookie['column_values'].value vals_to_set = text.split(',') for elem in vals_to_set: kw['systemsearch_column_%s' % elem] = elem
|
if 'column_values' in request.simple_cookie: text = request.simple_cookie['column_values'].value vals_to_set = text.split(',') for elem in vals_to_set: kw['systemsearch_column_%s' % elem] = elem
|
def systems(self, systems, *args, **kw): # Reset joinpoint and then outerjoin on user. This is so the sort # column works in paginate/datagrid. # Also need to do distinct or paginate gets confused by the joins #log.debug(kw)
|
if system.current_user(our_user): options['user_change_text'] = ' (Return)' is_user = True else: title = 'New'
|
if system.current_user(our_user): options['user_change_text'] = ' (Return)' is_user = True
|
def view(self, fqdn=None, **kw): if fqdn: try: system = System.by_fqdn(fqdn,identity.current.user) except InvalidRequestError: flash( _(u"Unable to find %s" % fqdn) ) redirect("/")
|
if session.connection(Recipe).execute(recipe_table.update(
|
elif session.connection(Recipe).execute(recipe_table.update(
|
def queued_recipes(*args): working = SystemStatus.by_name(u'Working') recipes = Recipe.query()\ .join('status')\ .join('systems')\ .join(['recipeset','priority'])\ .filter( or_( and_(Recipe.status==TaskStatus.by_name(u'Queued'), System.user==None, System.status==working, RecipeSet.lab_controller==None ), and_(Recipe.status==TaskStatus.by_name(u'Queued'), System.user==None, System.status==working, RecipeSet.lab_controller_id==System.lab_controller_id ) ) ) # Order recipes by priority. # FIXME Add secondary order by number of matched systems. if True: recipes = recipes.order_by(TaskPriority.id.desc()) if not recipes.count(): return False log.debug("Entering queued_recipes routine") for recipe in recipes: session.begin() try: systems = recipe.dyn_systems.filter(and_(System.user==None, System.status==working)) # Order systems by owner, then Group, finally shared for everyone. # FIXME Make this configurable, so that a user can specify their scheduling # preference from the job. # <recipe><scheduler method='random|fair|owner|group'/></recipe> if True: user = recipe.recipeset.job.owner systems = systems.order_by(case([(System.owner==user, 1), (System.owner!=user and Group.systems==None, 2)], else_=3)) if recipe.recipeset.lab_controller: # First recipe of a recipeSet determines the lab_controller systems = systems.filter( System.lab_controller==recipe.recipeset.lab_controller ) system = systems.first() if system: log.debug("System : %s is available for Recipe %s" % (system, recipe.id)) # Check to see if user still has proper permissions to use system # Remember the mapping of available systems could have happend hours or even # days ago and groups or loans could have been put in place since. if not System.free(user).filter(System.fqdn == system).first(): log.debug("System : %s recipe: %s no longer has access. removing" % (system, recipe.id)) recipe.systems.remove(system) # Atomic operation to put recipe in Scheduled state if session.connection(Recipe).execute(recipe_table.update( and_(recipe_table.c.id==recipe.id, recipe_table.c.status_id==TaskStatus.by_name(u'Queued').id)), status_id=TaskStatus.by_name(u'Scheduled').id).rowcount == 1: recipe.createRepo() # Even though the above put the recipe in the "Scheduled" state # it did not execute the update_status method. recipe.schedule() # Atomic operation to reserve the system if session.connection(Recipe).execute(system_table.update( and_(system_table.c.id==system.id, system_table.c.user_id==None)), user_id=recipe.recipeset.job.owner.user_id).rowcount == 1: recipe.system = system recipe.recipeset.lab_controller = system.lab_controller recipe.systems = [] # Create the watchdog without an Expire time. log.debug("Created watchdog for recipe id: %s and system: %s" % (recipe.id, system)) recipe.watchdog = Watchdog(system=recipe.system) activity = SystemActivity(recipe.recipeset.job.owner, "Scheduler", "Reserved", "User", "", "%s" % recipe.recipeset.job.owner ) system.activity.append(activity) log.info("recipe ID %s moved from Queued to Scheduled" % recipe.id) else: # The system was taken from underneath us. Put recipe # back into queued state and try again. raise BX(_('System %s was stolen from underneath us. will try again.' % system)) else: #Some other thread beat us. Skip this recipe now. # Depending on scheduler load it should be safe to run multiple # Queued processes.. Also, systems that we don't directly # control, for example, systems at a remote location that can # pull jobs but not have any pushed onto them. These systems # could take a recipe and put it in running state. Not sure how # to deal with multi-host jobs at remote locations. May need to # enforce single recipes for remote execution. pass session.commit() except exceptions.Exception, e: session.rollback() log.error("Failed to commit due to :%s" % e) session.close() log.debug("Exiting queued_recipes routine") return True
|
return (joins, and_(*queries))
|
if queries: return (joins, and_(*queries)) else: return ([],None)
|
def filter(self): queries = [] joins = [] for child in self: if callable(getattr(child, 'filter', None)): (join, query) = child.filter() queries.append(query) joins.extend(join) return (joins, and_(*queries))
|
return (joins, or_(*queries))
|
if queries: return (joins, or_(*queries)) else: return ([],None)
|
def filter(self): queries = [] joins = [] for child in self: if callable(getattr(child, 'filter', None)): (join, query) = child.filter() queries.append(query) joins.extend(join) return (joins, or_(*queries))
|
def handle_partition(self, partition): fs = None type = 'part' name = 'testarea' size = '5' for child in partition.childNodes:
|
def handle_partition(self, node): partition = self.doc.createElement('partition') for child in node.childNodes:
|
def handle_partition(self, partition): fs = None type = 'part' name = 'testarea' size = '5' for child in partition.childNodes: if child.nodeName == 'type': type = self.getText(child.childNodes) if child.nodeName == 'name': name = self.getText(child.childNodes) if child.nodeName == 'size': size = self.getText(child.childNodes) if child.nodeName == 'fs': fs = self.getText(child.childNodes) part_str = '%s:%s:%s' % (name,type,size) if fs: part_str = '%s:%s' % (part_str, fs) return part_str
|
type = self.getText(child.childNodes)
|
partition.setAttribute('type', self.getText(child.childNodes))
|
def handle_partition(self, partition): fs = None type = 'part' name = 'testarea' size = '5' for child in partition.childNodes: if child.nodeName == 'type': type = self.getText(child.childNodes) if child.nodeName == 'name': name = self.getText(child.childNodes) if child.nodeName == 'size': size = self.getText(child.childNodes) if child.nodeName == 'fs': fs = self.getText(child.childNodes) part_str = '%s:%s:%s' % (name,type,size) if fs: part_str = '%s:%s' % (part_str, fs) return part_str
|
name = self.getText(child.childNodes)
|
partition.setAttribute('name', self.getText(child.childNodes))
|
def handle_partition(self, partition): fs = None type = 'part' name = 'testarea' size = '5' for child in partition.childNodes: if child.nodeName == 'type': type = self.getText(child.childNodes) if child.nodeName == 'name': name = self.getText(child.childNodes) if child.nodeName == 'size': size = self.getText(child.childNodes) if child.nodeName == 'fs': fs = self.getText(child.childNodes) part_str = '%s:%s:%s' % (name,type,size) if fs: part_str = '%s:%s' % (part_str, fs) return part_str
|
size = self.getText(child.childNodes)
|
partition.setAttribute('size', self.getText(child.childNodes))
|
def handle_partition(self, partition): fs = None type = 'part' name = 'testarea' size = '5' for child in partition.childNodes: if child.nodeName == 'type': type = self.getText(child.childNodes) if child.nodeName == 'name': name = self.getText(child.childNodes) if child.nodeName == 'size': size = self.getText(child.childNodes) if child.nodeName == 'fs': fs = self.getText(child.childNodes) part_str = '%s:%s:%s' % (name,type,size) if fs: part_str = '%s:%s' % (part_str, fs) return part_str
|
fs = self.getText(child.childNodes) part_str = '%s:%s:%s' % (name,type,size) if fs: part_str = '%s:%s' % (part_str, fs) return part_str
|
partition.setAttribute('fs', self.getText(child.childNodes)) return partition
|
def handle_partition(self, partition): fs = None type = 'part' name = 'testarea' size = '5' for child in partition.childNodes: if child.nodeName == 'type': type = self.getText(child.childNodes) if child.nodeName == 'name': name = self.getText(child.childNodes) if child.nodeName == 'size': size = self.getText(child.childNodes) if child.nodeName == 'fs': fs = self.getText(child.childNodes) part_str = '%s:%s:%s' % (name,type,size) if fs: part_str = '%s:%s' % (part_str, fs) return part_str
|
if 'watchdog' in self.wrappedEl:
|
if hasattr(self.wrappedEl, 'watchdog'):
|
def __getattr__(self, attrname): if attrname == 'arch': return self.get_xml_attr('arch', unicode, None) elif attrname == 'id': return self.get_xml_attr('id', int, 0) elif attrname == 'recipe_set_id': return self.get_xml_attr('recipe_set_id', int, 0) elif attrname == 'job_id': return self.get_xml_attr('job_id', int, 0) elif attrname == 'distro': return self.get_xml_attr('distro', unicode, None) elif attrname == 'family': return self.get_xml_attr('family', unicode, None) elif attrname == 'variant': return self.get_xml_attr('variant', unicode, None) elif attrname == 'machine': return self.get_xml_attr('machine', unicode, None) elif attrname == 'status': return self.get_xml_attr('status', unicode, None) elif attrname == 'result': return self.get_xml_attr('result', unicode, None) elif attrname == 'ks_meta': return self.get_xml_attr('ks_meta', unicode, None) elif attrname == 'kernel_options': return self.get_xml_attr('kernel_options', unicode, None) elif attrname == 'kernel_options_post': return self.get_xml_attr('kernel_options_post', unicode, None) elif attrname == 'whiteboard': return self.get_xml_attr('whiteboard', unicode, None) elif attrname == 'kickstart': try: return ''.join([t for t in self.wrappedEl['kickstart']]) except: return None elif attrname == 'role': return self.get_xml_attr('role', unicode, u'None') elif attrname == 'watchdog': if 'watchdog' in self.wrappedEl: return XmlWatchdog(self.wrappedEl['watchdog']) else: return None else: raise AttributeError, attrname
|
except: item = cls(**kwargs) session.save(item) session.flush([item])
|
except InvalidRequestError, e: if e == 'Multiple rows returned for one()': log.error('Mutlitple rows returned for %s' % kwargs) elif e == 'No rows returned for one()': item = cls(**kwargs) session.save(item) session.flush([item])
|
def lazy_create(cls, **kwargs): try: item = cls.query.filter_by(**kwargs).one() except: item = cls(**kwargs) session.save(item) session.flush([item]) return item
|
class Utility:
|
def get_widget_attrs(table,column,with_desc=True,sortable=False,index=None): options = {} lower_column = column.lower() lower_table = table.lower()
|
|
systems = sys_search.return_results() new_systems = System.all(identity.current.user,system = systems) return new_systems
|
return sys_search.return_results()
|
def _system_search(self,kw,sys_search,use_custom_columns = False): for search in kw['systemsearch']: #clsinfo = System.get_dict()[search['table']] #Need to change this class_field_list = search['table'].split('/') cls_ref = search_utility.SystemSearch.translate_name(class_field_list[0]) col = class_field_list[1] #If value id False or True, let's convert them to if class_field_list[0] != 'Key': sys_search.append_results(cls_ref,search['value'],col,search['operation']) else: sys_search.append_results(cls_ref,search['value'],col,search['operation'],keyvalue=search['keyvalue'])
|
javascript = []
|
javascript = [ LocalJSLink('bkr','/static/javascript/jquery-1.3.1.js'), ]
|
def __init__(self,*args,**kw): self.priority_widget = PriorityWidget() if 'recipeset' in kw: self.recipeset = kw['recipeset'] else: self.recipeset = None
|
return_dict['keyvals'] = Key.get_all_keys()
|
return_dict['keyvals'] = [x for x in Key.get_all_keys() if x != 'MODULE']
|
def get_keyvalue_search_options(self,**kw): return_dict = {} return_dict['keyvals'] = Key.get_all_keys() return return_dict
|
self.watchdogs[watchdog['system']] = Monitor(watchdog,self.logger,self.conf)
|
self.watchdogs[watchdog['system']] = Monitor(watchdog,self.logger,self.conf,self.hub)
|
def active_watchdogs(self): """Monitor active watchdog entries"""
|
def __init__(self, watchdog, logger, conf, *args, **kwargs):
|
def __init__(self, watchdog, logger, conf, hub, *args, **kwargs):
|
def __init__(self, watchdog, logger, conf, *args, **kwargs): """ Monitor system """ self.log = logger self.conf = conf self.watchdog = watchdog self.logger.debug("Initialize monitor for system: %s" % self.watchdog['system']) self.watchedFiles = [WatchFile("%s/%s" % (self.conf["CONSOLE_LOGS"], self.watchdog["system"]),self.watchdog,self, self.conf["PANIC_REGEX"])]
|
self.log = logger
|
self.logger = logger
|
def __init__(self, watchdog, logger, conf, *args, **kwargs): """ Monitor system """ self.log = logger self.conf = conf self.watchdog = watchdog self.logger.debug("Initialize monitor for system: %s" % self.watchdog['system']) self.watchedFiles = [WatchFile("%s/%s" % (self.conf["CONSOLE_LOGS"], self.watchdog["system"]),self.watchdog,self, self.conf["PANIC_REGEX"])]
|
return "%s/%02d/%s/%s" % (self.recipeset.queue_time.year, int(str(job.id)[-2:]), job.id, self.id)
|
return "%s/%02d/%s/%s/%s" % (self.recipeset.queue_time.year, self.recipeset.queue_time.month, job.id // Log.MAX_ENTRIES_PER_DIRECTORY, job.id, self.id)
|
def filepath(self): """ Return file path for this recipe """ job = self.recipeset.job return "%s/%02d/%s/%s" % (self.recipeset.queue_time.year, int(str(job.id)[-2:]), job.id, self.id)
|
return "%s/%02d/%s/%s/%s" % (recipe.recipeset.queue_time.year, int(str(job.id)[-2:]), job.id, recipe.id, self.id)
|
return "%s/%02d/%s/%s/%s/%s" % (recipe.recipeset.queue_time.year, recipe.recipeset.queue_time.month, job.id // Log.MAX_ENTRIES_PER_DIRECTORY, job.id, recipe.id, self.id)
|
def filepath(self): """ Return file path for this task """ job = self.recipe.recipeset.job recipe = self.recipe return "%s/%02d/%s/%s/%s" % (recipe.recipeset.queue_time.year, int(str(job.id)[-2:]), job.id, recipe.id, self.id)
|
return "%s/%02d/%s/%s/%s/%s" % (recipe.recipeset.queue_time.year, int(str(job.id)[-2:]), job.id, recipe.id, task_id, self.id)
|
return "%s/%02d/%s/%s/%s/%s/%s" % (recipe.recipeset.queue_time.year, recipe.recipeset.queue_time.month, job.id // Log.MAX_ENTRIES_PER_DIRECTORY, job.id, recipe.id, task_id, self.id)
|
def filepath(self): """ Return file path for this result """ job = self.recipetask.recipe.recipeset.job recipe = self.recipetask.recipe task_id = self.recipetask.id return "%s/%02d/%s/%s/%s/%s" % (recipe.recipeset.queue_time.year, int(str(job.id)[-2:]), job.id, recipe.id, task_id, self.id)
|
if self.distro.arch in recipetasks.excluded_arch:
|
if self.distro.arch in recipetask.task.excluded_arch:
|
def append_tasks(self, recipetask): """ Before appending the task to this Recipe, make sure it applies. ie: not excluded for this distro family or arch. """ if self.distro.arch in recipetasks.excluded_arch: return if self.distro.osmajor in recipetasks.excluded_osmajor: return self.tasks.append(recipetask)
|
if self.distro.osmajor in recipetasks.excluded_osmajor:
|
if self.distro.osversion.osmajor in recipetask.task.excluded_osmajor:
|
def append_tasks(self, recipetask): """ Before appending the task to this Recipe, make sure it applies. ie: not excluded for this distro family or arch. """ if self.distro.arch in recipetasks.excluded_arch: return if self.distro.osmajor in recipetasks.excluded_osmajor: return self.tasks.append(recipetask)
|
distro_virt.setAttribute("value", True)
|
distro_virt.setAttribute("value", "1")
|
def _get_distro_requires(self): drs = xml.dom.minidom.parseString(self._distro_requires) # If no distro_virt is asked for default to Virt if not drs.getElementsByTagName("distro_virt"): distroRequires = self.doc.createElement("distroRequires") for dr in drs.getElementsByTagName("distroRequires"): for child in dr.childNodes[:]: distroRequires.appendChild(child) distro_virt = self.doc.createElement("distro_virt") distro_virt.setAttribute("op", "=") distro_virt.setAttribute("value", True) distroRequires.appendChild(distro_virt) return distroRequires.toxml() else: return self._distro_requires
|
recipeSet.appendChild(r.to_xml(clone, from_recipeset=True))
|
if not isinstance(r,GuestRecipe): recipeSet.appendChild(r.to_xml(clone, from_recipeset=True))
|
def to_xml(self, clone=False): recipeSet = self.doc.createElement("recipeSet") if not clone: recipeSet.setAttribute("id", "%s" % self.id) for r in self.recipes: recipeSet.appendChild(r.to_xml(clone, from_recipeset=True)) return recipeSet
|
if not from_recipeset:
|
if not from_recipeset and not isinstance(self, GuestRecipe):
|
def to_xml(self, recipe, clone=False, from_recipeset=False): if not clone: recipe.setAttribute("id", "%s" % self.id) recipe.setAttribute("job_id", "%s" % self.recipeset.job_id) recipe.setAttribute("recipe_set_id", "%s" % self.recipe_set_id) recipe.setAttribute("whiteboard", "%s" % self.whiteboard and self.whiteboard or '') if self.kickstart: kickstart = self.doc.createElement("kickstart") text = self.doc.createCDATASection('%s' % self.kickstart) kickstart.appendChild(text) recipe.appendChild(kickstart) recipe.setAttribute("kernel_options", "%s" % self.kernel_options and self.kernel_options or '') recipe.setAttribute("kernel_options_post", "%s" % self.kernel_options_post and self.kernel_options_post or '') if self.duration and not clone: recipe.setAttribute("duration", "%s" % self.duration) if self.result and not clone: recipe.setAttribute("result", "%s" % self.result) if self.status and not clone: recipe.setAttribute("status", "%s" % self.status) if self.distro and not clone: recipe.setAttribute("distro", "%s" % self.distro.name) recipe.setAttribute("arch", "%s" % self.distro.arch) recipe.setAttribute("family", "%s" % self.distro.osversion.osmajor) recipe.setAttribute("variant", "%s" % self.distro.variant) if self.system and not clone: recipe.setAttribute("system", "%s" % self.system) repos = self.doc.createElement("repos") if not clone: repo = self.doc.createElement("repo") repo.setAttribute("name", "beaker-tasks") repo.setAttribute("url", "https://%s/rpms" % get("servername", socket.gethostname())) repos.appendChild(repo) for repo in self.repos: repos.appendChild(repo.to_xml()) recipe.appendChild(repos) drs = xml.dom.minidom.parseString(self.distro_requires) hrs = xml.dom.minidom.parseString(self.host_requires) for dr in drs.getElementsByTagName("distroRequires"): recipe.appendChild(dr) hostRequires = self.doc.createElement("hostRequires") for hr in hrs.getElementsByTagName("hostRequires"): for child in hr.childNodes: hostRequires.appendChild(child) recipe.appendChild(hostRequires) for t in self.tasks: recipe.appendChild(t.to_xml(clone)) if not from_recipeset: recipeSet = self.doc.createElement("recipeSet") recipeSet.appendChild(recipe) job = self.doc.createElement("job") job.appendChild(self.node("whiteboard", self.recipeset.job.whiteboard)) job.appendChild(recipeSet) return job return recipe
|
systems = recipe.dyn_systems.filter(System.user==None)
|
systems = recipe.dyn_systems.filter(and_(System.user==None, System.status==working))
|
def queued_recipes(*args): recipes = Recipe.query()\ .join('status')\ .join('systems')\ .join(['recipeset','priority'])\ .filter( or_( and_(Recipe.status==TaskStatus.by_name(u'Queued'), System.user==None, RecipeSet.lab_controller==None ), and_(Recipe.status==TaskStatus.by_name(u'Queued'), System.user==None, RecipeSet.lab_controller_id==System.lab_controller_id ) ) ) # Order recipes by priority. # FIXME Add secondary order by number of matched systems. if True: recipes = recipes.order_by(TaskPriority.id.desc()) if not recipes.count(): return False log.debug("Entering queued_recipes routine") for recipe in recipes: session.begin() try: systems = recipe.dyn_systems.filter(System.user==None) # Order systems by owner, then Group, finally shared for everyone. # FIXME Make this configurable, so that a user can specify their scheduling # preference from the job. # <recipe><scheduler method='random|fair|owner|group'/></recipe> if True: user = recipe.recipeset.job.owner systems = systems.order_by(case([(System.owner==user, 1), (System.owner!=user and Group.systems==None, 2)], else_=3)) if recipe.recipeset.lab_controller: # First recipe of a recipeSet determines the lab_controller systems = systems.filter( System.lab_controller==recipe.recipeset.lab_controller ) system = systems.first() if system: log.debug("System : %s is available for Recipe %s" % (system, recipe.id)) # Atomic operation to put recipe in Scheduled state if session.connection(Recipe).execute(recipe_table.update( and_(recipe_table.c.id==recipe.id, recipe_table.c.status_id==TaskStatus.by_name(u'Queued').id)), status_id=TaskStatus.by_name(u'Scheduled').id).rowcount == 1: # Even though the above put the recipe in the "Scheduled" state # it did not execute the update_status method. recipe.schedule() # Atomic operation to reserve the system if session.connection(System).execute(system_table.update( and_(system_table.c.id==system.id, system_table.c.user_id==None)), user_id=recipe.recipeset.job.owner.user_id).rowcount == 1: recipe.system = system recipe.recipeset.lab_controller = system.lab_controller recipe.systems = [] # Create the watchdog without an Expire time. log.debug("Created watchdog for recipe id: %s and system: %s" % (recipe.id, system)) recipe.watchdog = Watchdog(system=recipe.system) activity = SystemActivity(recipe.recipeset.job.owner, "Scheduler", "Reserved", "User", "", "%s" % recipe.recipeset.job.owner ) system.activity.append(activity) log.info("recipe ID %s moved from Queued to Scheduled" % recipe.id) else: # The system was taken from underneath us. Put recipe # back into queued state and try again. recipe.queue() else: #Some other thread beat us. Skip this recipe now. # Depending on scheduler load it should be safe to run multiple # Queued processes.. Also, systems that we don't directly # control, for example, systems at a remote location that can # pull jobs but not have any pushed onto them. These systems # could take a recipe and put it in running state. Not sure how # to deal with multi-host jobs at remote locations. May need to # enforce single recipes for remote execution. pass session.commit() except exceptions.Exception, e: session.rollback() log.error("Failed to commit due to :%s" % e) session.close() log.debug("Exiting queued_recipes routine") return True
|
else
|
else:
|
def has_access(self, user): """ Given a user return True if that user has access to this system """ # If a system is loaned its available exclusively to that user if self.loaned != None: if self.loaned == self.user: return True else return False # If were the owner and its not loaned we have access. if self.owner == user: return True # If its shared and not in any groups we have access. # otherwise we need to be in the one of the groups for access. if self.shared == True: if self.groups == None: return True elif self.user in [group.id for group in self.groups]: return True
|
if self.distro.arch in recipetask.task.excluded_arch:
|
if self.distro.arch in [arch.arch for arch in recipetask.task.excluded_arch]:
|
def append_tasks(self, recipetask): """ Before appending the task to this Recipe, make sure it applies. ie: not excluded for this distro family or arch. """ if self.distro.arch in recipetask.task.excluded_arch: return if self.distro.osversion.osmajor in recipetask.task.excluded_osmajor: return self.tasks.append(recipetask)
|
if self.distro.osversion.osmajor in recipetask.task.excluded_osmajor:
|
if self.distro.osversion.osmajor in [osmajor.osmajor for osmajor in recipetask.task.excluded_osmajor]:
|
def append_tasks(self, recipetask): """ Before appending the task to this Recipe, make sure it applies. ie: not excluded for this distro family or arch. """ if self.distro.arch in recipetask.task.excluded_arch: return if self.distro.osversion.osmajor in recipetask.task.excluded_osmajor: return self.tasks.append(recipetask)
|
return self.hub.legacypush(fqdn, inventory)
|
return self.hub.push(fqdn, inventory)
|
def push(self, fqdn, inventory): """ Push inventory data to Scheduler """ return self.hub.legacypush(fqdn, inventory)
|
arch_table.c.id == system_arch_map.c.arch_id] query = getattr(arch_table.c.arch, op)(value)
|
alias.c.id == system_arch_map.c.arch_id] query = getattr(alias.c.arch, op)(value)
|
def filter(self): op = self.op_table[self.get_xml_attr('op', unicode, '==')] value = self.get_xml_attr('value', unicode, None) joins = [] query = None if value: joins = [system_table.c.id == system_arch_map.c.system_id, arch_table.c.id == system_arch_map.c.arch_id] query = getattr(arch_table.c.arch, op)(value) return (joins, query)
|
return cls._opposites_is_not_filter(x,y)
|
wildcard_y = re.sub('\*','%',y) if wildcard_y != y: arches = model.Arch.query().filter(model.Arch.arch.like(wildcard_y)) arch_ids = [arch.id for arch in arches] if not arch_ids: return 'True' wildcard = True y = wildcard_y else: try: valid_arch = model.Arch.query().filter(model.Arch.arch == y).one() wildcard = False except: return 'True' return cls._opposites_is_not_filter(x,y,wildcard=wildcard)
|
def arch_is_not_filter(cls,x,y): return cls._opposites_is_not_filter(x,y)
|
return cls._opposites_is_not_filter(x,y) @classmethod def _opposites_is_not_filter(cls,x,y):
|
def distro_is_not_filter(cls,x,y): return cls._opposites_is_not_filter(x,y)
|
|
if wildcard_y != y: return x.like(wildcard_y)
|
if wildcard_y != y: osmajors = model.OSMajor.query().filter(model.OSMajor.osmajor.like(wildcard_y)) osmajor_ids = [osmajor.id for osmajor in osmajors] if not osmajor_ids: return 'True' wildcard = True y = wildcard_y else: try: model.OSMajor.query().filter(model.OSMajor.osmajor == y).one() wildcard = False except: return 'True' return cls._opposites_is_not_filter(x,y,wildcard) @classmethod def _opposites_is_not_filter(cls,x,y,wildcard): if wildcard: return x.like(y)
|
def _opposites_is_not_filter(cls,x,y): wildcard_y = re.sub('\*','%',y) if wildcard_y != y: #looks like we found a wildcard return x.like(wildcard_y) if not y: return or_(x == None,x==y) return x == y
|
redirect("/recipes/view?id=%s" % system.watchdog.recipe_id)
|
redirect("/recipes/%s" % system.watchdog.recipe_id)
|
def user_change(self, id): msg = "" status = None activity = None try: system = System.by_id(id,identity.current.user) except InvalidRequestError: flash( _(u"Unable to find system with id of %s" % id) ) redirect("/") if system.user: if system.user == identity.current.user or \ identity.current.user.is_admin(): # Don't return a system with an active watchdog if system.watchdog: flash(_(u"Can't return %s active recipe %s" % (system.fqdn, system.watchdog.recipe_id))) redirect("/recipes/view?id=%s" % system.watchdog.recipe_id) else: status = "Returned" activity = SystemActivity(identity.current.user, "WEBUI", status, "User", '%s' % system.user, "") try: system.action_release() except BX, error_msg: msg = "Error: %s Action: %s" % (error_msg,system.release_action) system.activity.append(SystemActivity(identity.current.user, "WEBUI", "%s" % system.release_action, "Return", "", msg)) else: if system.can_share(identity.current.user): status = "Reserved" system.user = identity.current.user activity = SystemActivity(identity.current.user, 'WEBUI', status, 'User', '', '%s' % system.user ) system.activity.append(activity) session.save_or_update(system) flash( _(u"%s %s %s" % (status,system.fqdn,msg)) ) redirect("/view/%s" % system.fqdn)
|
turbomail.enqueue(message)
|
turbomail.send(message)
|
def send_mail(sender, to, subject, body): from turbomail import MailNotEnabledException message = turbomail.Message(sender, to, subject) message.plain = body try: #log.debug("Sending mail: %s" % message.plain) turbomail.enqueue(message) except MailNotEnabledException: log.warning("TurboMail is not enabled!") except Exception, e: log.error("Exception thrown when trying to send mail: %s" % str(e))
|
for recipe in job.recipes: if recipe.is_failed(): msg = "%s\tRecipeID: %s Arch: %s System: %s Distro: %s OSVersion: Status: %s Result: %s\n" \ % (msg, recipe.id, recipe.distro.arch, recipe.system, recipe.distro, recipe.distro.osversion, recipe.status, recipe.result) for task in recipe.tasks: if task.is_failed(): msg = "%s\t\tTaskID: %s TaskName: %s StartTime: %s Duration: %s Status: %s Result: %s\n" \ % (msg, task.id, task.task.name, task.start_time, task.elapsed_time, task.status, task.result)
|
for recipeset in job.recipesets: if recipeset.is_failed(): msg = "%s\tRecipeSetID: %s\n" % ( msg, recipeset.id ) for recipe in recipeset.recipes: if recipe.is_failed(): msg = "%s\t\tRecipeID: %s Arch: %s System: %s Distro: %s OSVersion: %s Status: %s Result: %s\n" \ % (msg, recipe.id, recipe.distro.arch, recipe.system, recipe.distro, recipe.distro.osversion, recipe.status, recipe.result) for task in recipe.tasks: if task.is_failed(): msg = "%s\t\t\tTaskID: %s TaskName: %s StartTime: %s Duration: %s Status: %s Result: %s\n" \ % (msg, task.id, task.task.name, task.start_time, task.duration, task.status, task.result)
|
def failed_recipes(job): msg = "JobID: %s Status: %s Result: %s\n" % \ (job.id, job.status, job.result) for recipe in job.recipes: if recipe.is_failed(): msg = "%s\tRecipeID: %s Arch: %s System: %s Distro: %s OSVersion: Status: %s Result: %s\n" \ % (msg, recipe.id, recipe.distro.arch, recipe.system, recipe.distro, recipe.distro.osversion, recipe.status, recipe.result) for task in recipe.tasks: if task.is_failed(): msg = "%s\t\tTaskID: %s TaskName: %s StartTime: %s Duration: %s Status: %s Result: %s\n" \ % (msg, task.id, task.task.name, task.start_time, task.elapsed_time, task.status, task.result) return msg
|
recipes = model.MachineRecipe.query().join(['distro','arch']).join(['recipeset','job']).filter(model.RecipeSet.job_id.in_(jobs)).add_column(model.Arch.arch)
|
recipes = model.Recipe.query().join(['distro','arch']).join(['recipeset','job']).filter(model.RecipeSet.job_id.in_(jobs)).add_column(model.Arch.arch)
|
def generate_data(self,**kw): """ generate_data() returns a nested tuple which represents tasks->arches->whiteboards and their data objects """ jobs = [] self.arches_used = {} self.whiteboards_used = {} whiteboard_data = {} if 'job_ids' in kw: jobs = kw['job_ids'].split() elif 'whiteboard' in kw: job_query = model.Job.query().filter(model.Job.whiteboard == kw['whiteboard']) for job in job_query: jobs.append(job.id) else: pass
|
m = Modeller() log.debug('field type is %s' % type)
|
m = Modeller()
|
def search_operators(cls,type,loose_match=None): m = Modeller() log.debug('field type is %s' % type) operators = m.return_operators(type,loose_match) return operators
|
if ks_meta: ks_meta = "%s ks_appends=True" else: ks_meta = "ks_appends=True"
|
ks_meta['ks_appends'] = True
|
def provision(self, distro=None, kickstart=None, ks_meta=None, kernel_options=None, kernel_options_post=None, ks_appends=None): """ Provision the System make xmlrpc call to lab controller """ if not distro: return False
|
return dict(grid=grid,list=systems,title='Systems for group %s' % group.group_name,search_bar = None)
|
return dict(grid=grid,list=systems,title='Systems for group %s' % group.group_name,search_bar = None,object_count=systems.count())
|
def systems(self,group_id,*args,**kw): systems = System.by_group(group_id) system_link = ('System', lambda x: x.link) group = Group.by_id(group_id) grid = myDataGrid(fields=[system_link]) return dict(grid=grid,list=systems,title='Systems for group %s' % group.group_name,search_bar = None)
|
print name + ','
|
print '"%s",' % name
|
def show_brief(nm, arch): if type(arch.toc) == type({}): toc = arch.toc for name,_ in toc.items(): print name + ',' else: print '[' toc = arch.toc.data for el in toc: print el[5] + ',' if rec_debug: if el[4] == 'z' or el[4] == 'a': show_brief(el[5], getArchive(el[5])) stack.pop() print ']'
|
print el[5] + ','
|
print '"%s",' % el[5]
|
def show_brief(nm, arch): if type(arch.toc) == type({}): toc = arch.toc for name,_ in toc.items(): print name + ',' else: print '[' toc = arch.toc.data for el in toc: print el[5] + ',' if rec_debug: if el[4] == 'z' or el[4] == 'a': show_brief(el[5], getArchive(el[5])) stack.pop() print ']'
|
cmd = "strip \"%s\"" % cachedfile
|
cmd = "strip -S \"%s\"" % cachedfile
|
def checkCache(fnm, strip, upx): # On darwin a cache is required anyway to keep the libaries # with relative install names if (not strip and not upx and sys.platform[:6] != 'darwin' and sys.platform != 'win32') or fnm.lower().endswith(".manifest"): return fnm if strip: strip = 1 else: strip = 0 if upx: upx = 1 else: upx = 0 # Load cache index cachedir = os.path.join(HOMEPATH, 'bincache%d%d' % (strip, upx)) if not os.path.exists(cachedir): os.makedirs(cachedir) cacheindexfn = os.path.join(cachedir, "index.dat") if os.path.exists(cacheindexfn): cache_index = _load_data(cacheindexfn) else: cache_index = {} # Verify if the file we're looking for is present in the cache. basenm = os.path.normcase(os.path.basename(fnm)) digest = cacheDigest(fnm) cachedfile = os.path.join(cachedir, basenm) cmd = None if cache_index.has_key(basenm): if digest != cache_index[basenm]: os.remove(cachedfile) else: return cachedfile if upx: if strip: fnm = checkCache(fnm, 1, 0) bestopt = "--best" # FIXME: Linux builds of UPX do not seem to contain LZMA (they assert out) # A better configure-time check is due. if config["hasUPX"] >= (3,) and os.name == "nt": bestopt = "--lzma" upx_executable = "upx" if config.get('upx_dir'): upx_executable = os.path.join(config['upx_dir'], upx_executable) cmd = '"' + upx_executable + '" ' + bestopt + " -q \"%s\"" % cachedfile else: if strip: cmd = "strip \"%s\"" % cachedfile shutil.copy2(fnm, cachedfile) os.chmod(cachedfile, 0755) if pyasm and fnm.lower().endswith(".pyd"): # If python.exe has dependent assemblies, check for embedded manifest # of cached pyd file because we may need to 'fix it' for pyinstaller try: res = winmanifest.GetManifestResources(os.path.abspath(cachedfile)) except winresource.pywintypes.error, e: if e.args[0] == winresource.ERROR_BAD_EXE_FORMAT: # Not a win32 PE file pass else: print "E:", os.path.abspath(cachedfile) raise else: if winmanifest.RT_MANIFEST in res and len(res[winmanifest.RT_MANIFEST]): for name in res[winmanifest.RT_MANIFEST]: for language in res[winmanifest.RT_MANIFEST][name]: try: manifest = winmanifest.Manifest() manifest.filename = ":".join([cachedfile, str(winmanifest.RT_MANIFEST), str(name), str(language)]) manifest.parse_string(res[winmanifest.RT_MANIFEST][name][language], False) except Exception, exc: print ("E: Cannot parse manifest resource %s, " "%s from") % (name, language) print "E:", cachedfile print "E:", traceback.format_exc() else: # Fix the embedded manifest (if any): # Extension modules built with Python 2.6.5 have # an empty <dependency> element, we need to add # dependentAssemblies from python.exe for # pyinstaller olen = len(manifest.dependentAssemblies) for pydep in pyasm: if not pydep.name in [dep.name for dep in manifest.dependentAssemblies]: print ("Adding %s to dependent assemblies " "of %s") % (pydep.name, cachedfile) manifest.dependentAssemblies.append(pydep) if len(manifest.dependentAssemblies) > olen: try: manifest.update_resources(os.path.abspath(cachedfile), [name], [language]) except Exception, e: print "E:", os.path.abspath(cachedfile) raise if cmd: system(cmd) # update cache index cache_index[basenm] = digest _save_data(cacheindexfn, cache_index) return cachedfile
|
j = vs.fromRaw(data)
|
j = vs.fromRaw(data, pe)
|
def decode(pathnm): h = win32api.LoadLibraryEx(pathnm, 0, LOAD_LIBRARY_AS_DATAFILE) nm = win32api.EnumResourceNames(h, RT_VERSION)[0] data = win32api.LoadResource(h, RT_VERSION, nm) vs = VSVersionInfo() j = vs.fromRaw(data) if TEST: print vs if data[:j] != vs.toRaw(): print "AAAAAGGHHHH" txt = repr(vs) glbls = {} glbls['VSVersionInfo'] = VSVersionInfo glbls['FixedFileInfo'] = FixedFileInfo glbls['StringFileInfo'] = StringFileInfo glbls['StringTable'] = StringTable glbls['StringStruct'] = StringStruct glbls['VarFileInfo'] = VarFileInfo glbls['VarStruct'] = VarStruct vs2 = eval(txt+'\n', glbls) if vs.toRaw() != vs2.toRaw(): print print 'reconstruction not the same!' print vs2 win32api.FreeLibrary(h) return vs
|
def fromRaw(self, data):
|
def fromRaw(self, data, pe=None):
|
def fromRaw(self, data): i, (sublen, vallen, wType, nm) = parseCommon(data) #vallen is length of the ffi, typ is 0, nm is 'VS_VERSION_INFO' i = ((i + 3) / 4) * 4 # now a VS_FIXEDFILEINFO self.ffi = FixedFileInfo() j = self.ffi.fromRaw(data, i) #print ffi if TEST: if data[i:j] != self.ffi.toRaw(): print "raw:", `data[i:j]` print "ffi:", `self.ffi.toRaw()` i = j while i < sublen: j = i i, (csublen, cvallen, ctyp, nm) = parseCommon(data, i) if string.strip(str(nm)) == "StringFileInfo": sfi = StringFileInfo() k = sfi.fromRaw(csublen, cvallen, nm, data, i, j+csublen) if TEST: if data[j:k] != sfi.toRaw(): rd = data[j:k] sd = sfi.toRaw() for x in range(0, len(rd), 16): rds = rd[x:x+16] sds = sd[x:x+16] if rds != sds: print "rd[%s:%s+16]: %s" % (x, x, `rds`) print "sd[%s:%s+16]: %s" % (x, x, `sds`) print print "raw: len %d, wLength %d" % (len(rd), struct.unpack('h', rd[:2])[0]) print "sfi: len %d, wLength %d" % (len(sd), struct.unpack('h', sd[:2])[0]) self.kids.append(sfi) i = k else: vfi = VarFileInfo() k = vfi.fromRaw(csublen, cvallen, nm, data, i, j+csublen) self.kids.append(vfi) if TEST: if data[j:k] != vfi.toRaw(): print "raw:", `data[j:k]` print "vfi:", `vfi.toRaw()` i = k i = j + csublen i = ((i + 3) / 4) * 4 return i
|
j = self.ffi.fromRaw(data, i)
|
j = self.ffi.fromRaw(data, i, pe)
|
def fromRaw(self, data): i, (sublen, vallen, wType, nm) = parseCommon(data) #vallen is length of the ffi, typ is 0, nm is 'VS_VERSION_INFO' i = ((i + 3) / 4) * 4 # now a VS_FIXEDFILEINFO self.ffi = FixedFileInfo() j = self.ffi.fromRaw(data, i) #print ffi if TEST: if data[i:j] != self.ffi.toRaw(): print "raw:", `data[i:j]` print "ffi:", `self.ffi.toRaw()` i = j while i < sublen: j = i i, (csublen, cvallen, ctyp, nm) = parseCommon(data, i) if string.strip(str(nm)) == "StringFileInfo": sfi = StringFileInfo() k = sfi.fromRaw(csublen, cvallen, nm, data, i, j+csublen) if TEST: if data[j:k] != sfi.toRaw(): rd = data[j:k] sd = sfi.toRaw() for x in range(0, len(rd), 16): rds = rd[x:x+16] sds = sd[x:x+16] if rds != sds: print "rd[%s:%s+16]: %s" % (x, x, `rds`) print "sd[%s:%s+16]: %s" % (x, x, `sds`) print print "raw: len %d, wLength %d" % (len(rd), struct.unpack('h', rd[:2])[0]) print "sfi: len %d, wLength %d" % (len(sd), struct.unpack('h', sd[:2])[0]) self.kids.append(sfi) i = k else: vfi = VarFileInfo() k = vfi.fromRaw(csublen, cvallen, nm, data, i, j+csublen) self.kids.append(vfi) if TEST: if data[j:k] != vfi.toRaw(): print "raw:", `data[j:k]` print "vfi:", `vfi.toRaw()` i = k i = j + csublen i = ((i + 3) / 4) * 4 return i
|
def fromRaw(self, data, i): (self.sig, self.strucVersion, self.fileVersionMS, self.fileVersionLS, self.productVersionMS, self.productVersionLS, self.fileFlagsMask, self.fileFlags, self.fileOS, self.fileType, self.fileSubtype, self.fileDateMS, self.fileDateLS) = struct.unpack('13l', data[i:i+52])
|
def fromRaw(self, data, i, pe=None): if hasattr(pe, 'VS_FIXEDFILEINFO'): info = pe.VS_FIXEDFILEINFO self.sig = info.Signature print 'type self.sig:', type(self.sig) self.strucVersion = info.StrucVersion self.fileVersionMS = info.FileVersionMS self.fileVersionLS = info.FileVersionLS self.productVersionMS = info.ProductVersionMS self.productVersionLS = info.ProductVersionLS self.fileFlagsMask = info.FileFlagsMask self.fileFlags = info.FileFlags self.fileOS = info.FileOS self.fileType = info.FileType self.fileSubtype = info.FileSubtype self.fileDateMS = info.FileDateMS self.fileDateLS = info.FileDateLS
|
def fromRaw(self, data, i): (self.sig, self.strucVersion, self.fileVersionMS, self.fileVersionLS, self.productVersionMS, self.productVersionLS, self.fileFlagsMask, self.fileFlags, self.fileOS, self.fileType, self.fileSubtype, self.fileDateMS, self.fileDateLS) = struct.unpack('13l', data[i:i+52]) return i+52
|
if target_iswin: exe = exe + "_" is24 = hasattr(sys, "version_info") and sys.version_info[:2] >= (2,4) exe = exe + "67"[is24] exe = exe + "rd"[self.debug] exe = exe + "wc"[self.console] else: if not self.console: exe = exe + 'w' if self.debug: exe = exe + '_d'
|
if not self.console: exe = exe + 'w' if self.debug: exe = exe + '_d'
|
def _bootloader_postfix(self, exe): if target_iswin: exe = exe + "_" is24 = hasattr(sys, "version_info") and sys.version_info[:2] >= (2,4) exe = exe + "67"[is24] exe = exe + "rd"[self.debug] exe = exe + "wc"[self.console] else: if not self.console: exe = exe + 'w' if self.debug: exe = exe + '_d' return exe
|
sys.pathex = pathex[:]
|
sys.pathex = self.pathex[:]
|
def __init__(self, scripts=None, pathex=None, hookspath=None, excludes=None): Target.__init__(self) self.inputs = scripts for script in scripts: if not os.path.exists(script): raise ValueError, "script '%s' not found" % script self.pathex = [] if pathex: for path in pathex: self.pathex.append(absnormpath(path)) sys.pathex = pathex[:] self.hookspath = hookspath self.excludes = excludes self.scripts = TOC() self.pure = TOC() self.binaries = TOC() self.zipfiles = TOC() self.datas = TOC() self.__postinit__()
|
if pyasm:
|
if os.path.altsep: fnm = fnm.replace(os.path.altsep, os.path.sep) if pyasm and os.path.dirname(fnm.lower()) != os.path.join(HOMEPATH.lower(), "support", "loader"):
|
def checkCache(fnm, strip, upx): # On darwin a cache is required anyway to keep the libaries # with relative install names if not strip and not upx and sys.platform != 'darwin' and sys.platform != 'win32': return fnm global winresource, winmanifest if strip: strip = 1 else: strip = 0 if upx: upx = 1 else: upx = 0 # Load cache index cachedir = os.path.join(HOMEPATH, 'bincache%d%d' % (strip, upx)) if not os.path.exists(cachedir): os.makedirs(cachedir) cacheindexfn = os.path.join(cachedir, "index.dat") if os.path.exists(cacheindexfn): cache_index = _load_data(cacheindexfn) else: cache_index = {} # Verify if the file we're looking for is present in the cache. basenm = os.path.normcase(os.path.basename(fnm)) digest = cacheDigest(fnm) cachedfile = os.path.join(cachedir, basenm) cmd = None if cache_index.has_key(basenm): if digest != cache_index[basenm]: os.remove(cachedfile) else: return cachedfile if upx: if strip: fnm = checkCache(fnm, 1, 0) bestopt = "--best" # FIXME: Linux builds of UPX do not seem to contain LZMA (they assert out) # A better configure-time check is due. if config["hasUPX"] >= (3,) and os.name == "nt": bestopt = "--lzma" upx_executable = "upx" if config.get('upx_dir'): upx_executable = os.path.join(config['upx_dir'], upx_executable) cmd = '"' + upx_executable + '" ' + bestopt + " -q \"%s\"" % cachedfile else: if strip: cmd = "strip \"%s\"" % cachedfile shutil.copy2(fnm, cachedfile) os.chmod(cachedfile, 0755) if pyasm: # If python.exe has dependent assemblies, check for embedded manifest # of cached file because we may need to 'fix it' for pyinstaller try: res = winmanifest.GetManifestResources(os.path.abspath(cachedfile)) except winresource.pywintypes.error, e: if e.args[0] == winresource.ERROR_BAD_EXE_FORMAT: # Not a win32 PE file pass else: raise else: if winmanifest.RT_MANIFEST in res and len(res[winmanifest.RT_MANIFEST]): for name in res[winmanifest.RT_MANIFEST]: for language in res[winmanifest.RT_MANIFEST][name]: try: manifest = winmanifest.Manifest() manifest.filename = ":".join([cachedfile, str(winmanifest.RT_MANIFEST), str(name), str(language)]) manifest.parse_string(res[winmanifest.RT_MANIFEST][name][language], False) except Exception, exc: print ("E: Cannot parse manifest resource %s, " "%s from") % (name, language) print "E:", cachedfile print "E:", traceback.format_exc() else: # Fix the embedded manifest (if any): # Extension modules built with Python 2.6.5 have # an empty <dependency> element, we need to add # dependentAssemblies from python.exe for # pyinstaller olen = len(manifest.dependentAssemblies) for pydep in pyasm: if not pydep.name in [dep.name for dep in manifest.dependentAssemblies]: print ("Adding %s to dependent assemblies " "of %s") % (pydep.name, cachedfile) manifest.dependentAssemblies.append(pydep) if len(manifest.dependentAssemblies) > olen: manifest.update_resources(cachedfile, [name], [language]) if cmd: system(cmd) # update cache index cache_index[basenm] = digest _save_data(cacheindexfn, cache_index) return cachedfile
|
JUMP_IF_FALSE = dis.opname.index('JUMP_IF_FALSE') JUMP_IF_TRUE = dis.opname.index('JUMP_IF_TRUE')
|
if getattr(sys, 'version_info', (0,0,0)) >= (2,7,0): COND_OPS = [dis.opname.index('POP_JUMP_IF_TRUE'), dis.opname.index('POP_JUMP_IF_FALSE'), dis.opname.index('JUMP_IF_TRUE_OR_POP'), dis.opname.index('JUMP_IF_FALSE_OR_POP'), ] else: COND_OPS = [dis.opname.index('JUMP_IF_FALSE'), dis.opname.index('JUMP_IF_TRUE'), ]
|
def doimport(self, nm): mod = self.owner.getmod(self.__name__ + '.' + nm) return mod
|
COND_OPS = [JUMP_IF_TRUE, JUMP_IF_FALSE] STORE_OPS = [STORE_NAME, STORE_FAST, STORE_GLOBAL, STORE_DEREF]
|
STORE_OPS = [STORE_NAME, STORE_FAST, STORE_GLOBAL, STORE_DEREF, STORE_MAP]
|
def doimport(self, nm): mod = self.owner.getmod(self.__name__ + '.' + nm) return mod
|
out = i + oparg
|
out = oparg if op in dis.hasjrel: out += i
|
def pass1(code): instrs = [] i = 0 n = len(code) curline = 0 incondition = 0 out = 0 while i < n: if i >= out: incondition = 0 c = code[i] i = i+1 op = ord(c) if op >= dis.HAVE_ARGUMENT: oparg = ord(code[i]) + ord(code[i+1])*256 i = i+2 else: oparg = None if not incondition and op in COND_OPS: incondition = 1 out = i + oparg elif incondition and op == JUMP_FORWARD: out = max(out, i + oparg) if op == SET_LINENO: curline = oparg else: instrs.append((op, oparg, incondition, curline)) return instrs
|
elif op == IMPORT_STAR:
|
elif op == IMPORT_STAR: assert lastname is not None
|
def scan_code(co, m=None, w=None, b=None, nested=0): instrs = pass1(co.co_code) if m is None: m = [] if w is None: w = [] if b is None: b = [] all = None lastname = None level = -1 # import-level, same behaviour as up to Python 2.4 for i in range(len(instrs)): op, oparg, conditional, curline = instrs[i] if op == IMPORT_NAME: if level <= 0: name = lastname = co.co_names[oparg] else: name = lastname = co.co_names[oparg] #print 'import_name', name, `lastname`, level m.append((name, nested, conditional, level)) elif op == IMPORT_FROM: name = co.co_names[oparg] #print 'import_from', name, `lastname`, level, if level > 0 and (not lastname or lastname[-1:] == '.'): name = lastname + name else: name = lastname + '.' + name #print name m.append((name, nested, conditional, level)) assert lastname is not None elif op == IMPORT_STAR: m.append((lastname+'.*', nested, conditional, level)) elif op == STORE_NAME: if co.co_names[oparg] == "__all__": j = i - 1 pop, poparg, pcondtl, pline = instrs[j] if pop != BUILD_LIST: w.append("W: __all__ is built strangely at line %s" % pline) else: all = [] while j > 0: j = j - 1 pop, poparg, pcondtl, pline = instrs[j] if pop == LOAD_CONST: all.append(co.co_consts[poparg]) else: break elif op in STORE_OPS: pass elif op == LOAD_CONST_level: # starting with Python 2.5, _each_ import is preceeded with a # LOAD_CONST to indicate the relative level. if isinstance(co.co_consts[oparg], (int, long)): level = co.co_consts[oparg] elif op == LOAD_GLOBAL: name = co.co_names[oparg] cndtl = ['', 'conditional'][conditional] lvl = ['top-level', 'delayed'][nested] if name == "__import__": w.append("W: %s %s __import__ hack detected at line %s" % (lvl, cndtl, curline)) elif name == "eval": w.append("W: %s %s eval hack detected at line %s" % (lvl, cndtl, curline)) elif op == EXEC_STMT: cndtl = ['', 'conditional'][conditional] lvl = ['top-level', 'delayed'][nested] w.append("W: %s %s exec statement detected at line %s" % (lvl, cndtl, curline)) else: lastname = None if ctypes: # ctypes scanning requires a scope wider than one bytecode instruction, # so the code resides in a separate function for clarity. ctypesb, ctypesw = scan_code_for_ctypes(co, instrs, i) b.extend(ctypesb) w.extend(ctypesw) for c in co.co_consts: if isinstance(c, type(co)): # FIXME: "all" was not updated here nor returned. Was it the desired # behaviour? _, _, _, all_nested = scan_code(c, m, w, b, 1) if all_nested: all.extend(all_nested) return m, w, b, all
|
if isinstance(self.manifest, manifest.Manifest):
|
if isinstance(self.manifest, winmanifest.Manifest):
|
def assemble(self): print "building EXE from", os.path.basename(self.out) trash = [] if not os.path.exists(os.path.dirname(self.name)): os.makedirs(os.path.dirname(self.name)) outf = open(self.name, 'wb') exe = self._bootloader_postfix('support/loader/run') exe = os.path.join(HOMEPATH, exe) if target_iswin or cygwin: exe = exe + '.exe' if config['hasRsrcUpdate'] and (self.icon or self.versrsrc or self.manifest or self.resources): tmpnm = tempfile.mktemp() shutil.copy2(exe, tmpnm) os.chmod(tmpnm, 0755) if self.icon: icon.CopyIcons(tmpnm, self.icon) if self.versrsrc: versionInfo.SetVersion(tmpnm, self.versrsrc) if self.manifest: if isinstance(self.manifest, manifest.Manifest): # Manifest instance winmanifest.UpdateManifestResourcesFromXML(tmpnm, self.manifest.toprettyxml(), [1]) elif "<" in self.manifest: # Assume XML string winmanifest.UpdateManifestResourcesFromXML(tmpnm, self.manifest, [1]) else: # Assume filename winmanifest.UpdateManifestResourcesFromXMLFile(tmpnm, self.manifest, [1]) for res in self.resources: res = res.split(",") for i in range(len(res[1:])): try: res[i + 1] = int(res[i + 1]) except ValueError: pass resfile = res[0] if len(res) > 1: restype = res[1] else: restype = None if len(res) > 2: resname = res[2] else: restype = None if len(res) > 3: reslang = res[3] else: restype = None try: winresource.UpdateResourcesFromResFile(tmpnm, resfile, [restype or "*"], [resname or "*"], [reslang or "*"]) except winresource.pywintypes.error, exc: if exc.args[0] != winresource.ERROR_BAD_EXE_FORMAT: print "E:", str(exc) continue if not restype or not resname: print "E: resource type and/or name not specified" continue if "*" in (restype, resname): print ("E: no wildcards allowed for resource type " "and name when source file does not contain " "resources") continue try: winresource.UpdateResourcesFromDataFile(tmpnm, resfile, restype, [resname], [reslang or 0]) except winresource.pywintypes.error, exc: print "E:", str(exc) trash.append(tmpnm) exe = tmpnm exe = checkCache(exe, self.strip, self.upx and config['hasUPX']) self.copy(exe, outf) if self.append_pkg: print "Appending archive to EXE", self.name self.copy(self.pkg.name, outf) else: print "Copying archive to", self.pkgname shutil.copy2(self.pkg.name, self.pkgname) outf.close() os.chmod(self.name, 0755) _save_data(self.out, (self.name, self.console, self.debug, self.icon, self.versrsrc, self.manifest, self.resources, self.strip, self.upx, self.crypt, mtime(self.name))) for item in trash: os.remove(item) return 1
|
import pyi_optparse as optparsae
|
import pyi_optparse as optparse
|
def main(): global global_opts global opts import pyi_optparse as optparsae cmds = {} p = optparse.OptionParser( usage="%prog [opts] file", description="Generate a plaintext keyfile containing a " "random-generated encryption key. ") cmds["genkey"] = p for c,p in cmds.items(): p.prog = p.get_prog_name() + " " + c cmdnames = cmds.keys() cmdnames.sort() p = optparse.OptionParser( usage="%prog cmd [opts]\n\n" + "Available Commands:\n " + "\n ".join(cmdnames), description="This tool is a helper of crypt-related tasks with PyInstaller." ) p.disable_interspersed_args() global_opts,args = p.parse_args() if not args: p.print_usage() return -1 c = args.pop(0) if c not in cmds.keys(): print "invalid command: %s" % c return -1 p = cmds[c] opts, args = p.parse_args(args) try: return globals()["cmd_" + c](args) except ArgsError, e: p.error(e)
|
if not strip and not upx and sys.platform != 'darwin':
|
if not strip and not upx and sys.platform != 'darwin' and sys.platform != 'win32':
|
def checkCache(fnm, strip, upx): # On darwin a cache is required anyway to keep the libaries # with relative install names if not strip and not upx and sys.platform != 'darwin': return fnm if strip: strip = 1 else: strip = 0 if upx: upx = 1 else: upx = 0 # Load cache index cachedir = os.path.join(HOMEPATH, 'bincache%d%d' % (strip, upx)) if not os.path.exists(cachedir): os.makedirs(cachedir) cacheindexfn = os.path.join(cachedir, "index.dat") if os.path.exists(cacheindexfn): cache_index = _load_data(cacheindexfn) else: cache_index = {} # Verify if the file we're looking for is present in the cache. basenm = os.path.normcase(os.path.basename(fnm)) digest = cacheDigest(fnm) cachedfile = os.path.join(cachedir, basenm) cmd = None if cache_index.has_key(basenm): if digest != cache_index[basenm]: os.remove(cachedfile) else: return cachedfile if upx: if strip: fnm = checkCache(fnm, 1, 0) bestopt = "--best" # FIXME: Linux builds of UPX do not seem to contain LZMA (they assert out) # A better configure-time check is due. if config["hasUPX"] >= (3,) and os.name == "nt": bestopt = "--lzma" upx_executable = "upx" if config.get('upx_dir'): upx_executable = os.path.join(config['upx_dir'], upx_executable) cmd = '"' + upx_executable + '" ' + bestopt + " -q \"%s\"" % cachedfile else: if strip: cmd = "strip \"%s\"" % cachedfile shutil.copy2(fnm, cachedfile) os.chmod(cachedfile, 0755) if cmd: system(cmd) # update cache index cache_index[basenm] = digest _save_data(cacheindexfn, cache_index) return cachedfile
|
if nm.lower() == (assembly.name + ".dll").lower(): ftocnm = nm else: ftocnm = os.path.join(assembly.name, nm)
|
ftocnm = nm
|
def selectAssemblies(pth): """Return a binary's dependent assemblies files that should be included. Return a list of pairs (name, fullpath) """ rv = [] if not os.path.isfile(pth): pth = check_extract_from_egg(pth)[0][0] for assembly in getAssemblies(pth): if excludesRe.search(assembly.name): if not silent: print "I: Skipping assembly", assembly.getid() continue if seen.get(assembly.getid().upper(),0): continue elif assembly.optional: if not silent: print "I: Skipping optional assembly", assembly.getid() continue files = assembly.find_files() if files: seen[assembly.getid().upper()] = 1 for fn in files: fname, fext = os.path.splitext(fn) if fext.lower() == ".manifest": nm = assembly.name + fext else: nm = os.path.basename(fn) if nm.lower() == (assembly.name + ".dll").lower(): # If single DLL assembly with embedded manifest, do not # create a subfolder ftocnm = nm else: ftocnm = os.path.join(assembly.name, nm) if assembly.language not in (None, "", "*", "neutral"): ftocnm = os.path.join(assembly.getlanguage(), ftocnm) nm, ftocnm, fn = [item.encode(sys.getfilesystemencoding()) for item in (nm, ftocnm, fn)] if not seen.get(fn.upper(),0): if not silent: print "I: Adding", ftocnm seen[nm.upper()] = 1 seen[fn.upper()] = 1 rv.append((ftocnm, fn)) else: #print "I: skipping", ftocnm, "part of assembly", \ # assembly.name, "dependency of", pth pass else: print "E: Assembly", assembly.getid(), "not found" return rv
|
if fromlist is None:
|
if not fromlist:
|
def importHook(self, name, globals=None, locals=None, fromlist=None, level=-1): __globals_name = None if globals: __globals_name = globals.get('__name__') # first see if we could be importing a relative name debug("importHook(%s, %s, locals, %s, %s)" % (name, __globals_name, fromlist, level)) _sys_modules_get = sys.modules.get _self_doimport = self.doimport threaded = self.threaded
|
show(name, arch)
|
show(name, arch, debug, name + '.log')
|
def main(): global stack name = sys.argv[1] arch = getArchive(name) stack.append((name, arch)) show(name, arch) while 1: try: toks = string.split(raw_input('? '), None, 1) except EOFError: # Ctrl-D print # clear line break if not toks: usage() continue if len(toks) == 1: cmd = toks[0] arg = '' else: cmd, arg = toks cmd = string.upper(cmd) if cmd == 'U': if len(stack) > 1: arch = stack[-1][1] arch.lib.close() del stack[-1] nm, arch = stack[-1] show(nm, arch) elif cmd == 'O': if not arg: arg = raw_input('open name? ') arg = string.strip(arg) arch = getArchive(arg) if arch is None: print arg, "not found" continue stack.append((arg, arch)) show(arg, arch) elif cmd == 'X': if not arg: arg = raw_input('extract name? ') arg = string.strip(arg) data = getData(arg, arch) if data is None: print "Not found" continue fnm = raw_input('to filename? ') if not fnm: print `data` else: open(fnm, 'wb').write(data) elif cmd == 'Q': break else: usage() for (nm, arch) in stack: arch.lib.close() stack = [] for fnm in cleanup: try: os.remove(fnm) except Exception, e: print "couldn't delete", fnm, e.args
|
show(nm, arch)
|
show(nm, arch, debug, name + '.' + nm + '.log')
|
def main(): global stack name = sys.argv[1] arch = getArchive(name) stack.append((name, arch)) show(name, arch) while 1: try: toks = string.split(raw_input('? '), None, 1) except EOFError: # Ctrl-D print # clear line break if not toks: usage() continue if len(toks) == 1: cmd = toks[0] arg = '' else: cmd, arg = toks cmd = string.upper(cmd) if cmd == 'U': if len(stack) > 1: arch = stack[-1][1] arch.lib.close() del stack[-1] nm, arch = stack[-1] show(nm, arch) elif cmd == 'O': if not arg: arg = raw_input('open name? ') arg = string.strip(arg) arch = getArchive(arg) if arch is None: print arg, "not found" continue stack.append((arg, arch)) show(arg, arch) elif cmd == 'X': if not arg: arg = raw_input('extract name? ') arg = string.strip(arg) data = getData(arg, arch) if data is None: print "Not found" continue fnm = raw_input('to filename? ') if not fnm: print `data` else: open(fnm, 'wb').write(data) elif cmd == 'Q': break else: usage() for (nm, arch) in stack: arch.lib.close() stack = [] for fnm in cleanup: try: os.remove(fnm) except Exception, e: print "couldn't delete", fnm, e.args
|
show(arg, arch)
|
show(arg, arch, debug, name + '.' + arg + '.log')
|
def main(): global stack name = sys.argv[1] arch = getArchive(name) stack.append((name, arch)) show(name, arch) while 1: try: toks = string.split(raw_input('? '), None, 1) except EOFError: # Ctrl-D print # clear line break if not toks: usage() continue if len(toks) == 1: cmd = toks[0] arg = '' else: cmd, arg = toks cmd = string.upper(cmd) if cmd == 'U': if len(stack) > 1: arch = stack[-1][1] arch.lib.close() del stack[-1] nm, arch = stack[-1] show(nm, arch) elif cmd == 'O': if not arg: arg = raw_input('open name? ') arg = string.strip(arg) arch = getArchive(arg) if arch is None: print arg, "not found" continue stack.append((arg, arch)) show(arg, arch) elif cmd == 'X': if not arg: arg = raw_input('extract name? ') arg = string.strip(arg) data = getData(arg, arch) if data is None: print "Not found" continue fnm = raw_input('to filename? ') if not fnm: print `data` else: open(fnm, 'wb').write(data) elif cmd == 'Q': break else: usage() for (nm, arch) in stack: arch.lib.close() stack = [] for fnm in cleanup: try: os.remove(fnm) except Exception, e: print "couldn't delete", fnm, e.args
|
def show(nm, arch):
|
def show(nm, arch, onfile=False, fn=None):
|
def show(nm, arch): if type(arch.toc) == type({}): print " Name: (ispkg, pos, len)" toc = arch.toc else: print " pos, length, uncompressed, iscompressed, type, name" toc = arch.toc.data pprint.pprint(toc)
|
opts.pathex.extend(string.split(p, os.pathsep))
|
opts.pathex.extend(p.split(os.pathsep))
|
def run_makespec(opts, args): # Split pathex by using the path separator temppaths = opts.pathex[:] opts.pathex = [] for p in temppaths: opts.pathex.extend(string.split(p, os.pathsep)) spec_file = Makespec.main(args, **opts.__dict__) print "wrote %s" % spec_file return spec_file
|
if pyasm and os.path.dirname(fnm.lower()) != os.path.join(HOMEPATH.lower(), "support", "loader"):
|
if pyasm and not fnm.lower().startswith("bincache") and \ os.path.dirname(fnm.lower()) not in (os.path.join(HOMEPATH.lower(), "support", "loader"), tempfile.gettempdir().lower()):
|
def checkCache(fnm, strip, upx): # On darwin a cache is required anyway to keep the libaries # with relative install names if not strip and not upx and sys.platform != 'darwin' and sys.platform != 'win32': return fnm global winresource, winmanifest if strip: strip = 1 else: strip = 0 if upx: upx = 1 else: upx = 0 # Load cache index cachedir = os.path.join(HOMEPATH, 'bincache%d%d' % (strip, upx)) if not os.path.exists(cachedir): os.makedirs(cachedir) cacheindexfn = os.path.join(cachedir, "index.dat") if os.path.exists(cacheindexfn): cache_index = _load_data(cacheindexfn) else: cache_index = {} # Verify if the file we're looking for is present in the cache. basenm = os.path.normcase(os.path.basename(fnm)) digest = cacheDigest(fnm) cachedfile = os.path.join(cachedir, basenm) cmd = None if cache_index.has_key(basenm): if digest != cache_index[basenm]: os.remove(cachedfile) else: return cachedfile if upx: if strip: fnm = checkCache(fnm, 1, 0) bestopt = "--best" # FIXME: Linux builds of UPX do not seem to contain LZMA (they assert out) # A better configure-time check is due. if config["hasUPX"] >= (3,) and os.name == "nt": bestopt = "--lzma" upx_executable = "upx" if config.get('upx_dir'): upx_executable = os.path.join(config['upx_dir'], upx_executable) cmd = '"' + upx_executable + '" ' + bestopt + " -q \"%s\"" % cachedfile else: if strip: cmd = "strip \"%s\"" % cachedfile shutil.copy2(fnm, cachedfile) os.chmod(cachedfile, 0755) if os.path.altsep: fnm = fnm.replace(os.path.altsep, os.path.sep) if pyasm and os.path.dirname(fnm.lower()) != os.path.join(HOMEPATH.lower(), "support", "loader"): # If python.exe has dependent assemblies, check for embedded manifest # of cached file because we may need to 'fix it' for pyinstaller # but NEVER alter the loader exe try: res = winmanifest.GetManifestResources(os.path.abspath(cachedfile)) except winresource.pywintypes.error, e: if e.args[0] == winresource.ERROR_BAD_EXE_FORMAT: # Not a win32 PE file pass else: raise else: if winmanifest.RT_MANIFEST in res and len(res[winmanifest.RT_MANIFEST]): for name in res[winmanifest.RT_MANIFEST]: for language in res[winmanifest.RT_MANIFEST][name]: try: manifest = winmanifest.Manifest() manifest.filename = ":".join([cachedfile, str(winmanifest.RT_MANIFEST), str(name), str(language)]) manifest.parse_string(res[winmanifest.RT_MANIFEST][name][language], False) except Exception, exc: print ("E: Cannot parse manifest resource %s, " "%s from") % (name, language) print "E:", cachedfile print "E:", traceback.format_exc() else: # Fix the embedded manifest (if any): # Extension modules built with Python 2.6.5 have # an empty <dependency> element, we need to add # dependentAssemblies from python.exe for # pyinstaller olen = len(manifest.dependentAssemblies) for pydep in pyasm: if not pydep.name in [dep.name for dep in manifest.dependentAssemblies]: print ("Adding %s to dependent assemblies " "of %s") % (pydep.name, cachedfile) manifest.dependentAssemblies.append(pydep) if len(manifest.dependentAssemblies) > olen: manifest.update_resources(cachedfile, [name], [language]) if cmd: system(cmd) # update cache index cache_index[basenm] = digest _save_data(cacheindexfn, cache_index) return cachedfile
|
manifest.update_resources(cachedfile, [name], [language])
|
try: manifest.update_resources(os.path.abspath(cachedfile), [name], [language]) except Exception, e: print "E:", os.path.abspath(cachedfile) raise
|
def checkCache(fnm, strip, upx): # On darwin a cache is required anyway to keep the libaries # with relative install names if not strip and not upx and sys.platform != 'darwin' and sys.platform != 'win32': return fnm global winresource, winmanifest if strip: strip = 1 else: strip = 0 if upx: upx = 1 else: upx = 0 # Load cache index cachedir = os.path.join(HOMEPATH, 'bincache%d%d' % (strip, upx)) if not os.path.exists(cachedir): os.makedirs(cachedir) cacheindexfn = os.path.join(cachedir, "index.dat") if os.path.exists(cacheindexfn): cache_index = _load_data(cacheindexfn) else: cache_index = {} # Verify if the file we're looking for is present in the cache. basenm = os.path.normcase(os.path.basename(fnm)) digest = cacheDigest(fnm) cachedfile = os.path.join(cachedir, basenm) cmd = None if cache_index.has_key(basenm): if digest != cache_index[basenm]: os.remove(cachedfile) else: return cachedfile if upx: if strip: fnm = checkCache(fnm, 1, 0) bestopt = "--best" # FIXME: Linux builds of UPX do not seem to contain LZMA (they assert out) # A better configure-time check is due. if config["hasUPX"] >= (3,) and os.name == "nt": bestopt = "--lzma" upx_executable = "upx" if config.get('upx_dir'): upx_executable = os.path.join(config['upx_dir'], upx_executable) cmd = '"' + upx_executable + '" ' + bestopt + " -q \"%s\"" % cachedfile else: if strip: cmd = "strip \"%s\"" % cachedfile shutil.copy2(fnm, cachedfile) os.chmod(cachedfile, 0755) if os.path.altsep: fnm = fnm.replace(os.path.altsep, os.path.sep) if pyasm and os.path.dirname(fnm.lower()) != os.path.join(HOMEPATH.lower(), "support", "loader"): # If python.exe has dependent assemblies, check for embedded manifest # of cached file because we may need to 'fix it' for pyinstaller # but NEVER alter the loader exe try: res = winmanifest.GetManifestResources(os.path.abspath(cachedfile)) except winresource.pywintypes.error, e: if e.args[0] == winresource.ERROR_BAD_EXE_FORMAT: # Not a win32 PE file pass else: raise else: if winmanifest.RT_MANIFEST in res and len(res[winmanifest.RT_MANIFEST]): for name in res[winmanifest.RT_MANIFEST]: for language in res[winmanifest.RT_MANIFEST][name]: try: manifest = winmanifest.Manifest() manifest.filename = ":".join([cachedfile, str(winmanifest.RT_MANIFEST), str(name), str(language)]) manifest.parse_string(res[winmanifest.RT_MANIFEST][name][language], False) except Exception, exc: print ("E: Cannot parse manifest resource %s, " "%s from") % (name, language) print "E:", cachedfile print "E:", traceback.format_exc() else: # Fix the embedded manifest (if any): # Extension modules built with Python 2.6.5 have # an empty <dependency> element, we need to add # dependentAssemblies from python.exe for # pyinstaller olen = len(manifest.dependentAssemblies) for pydep in pyasm: if not pydep.name in [dep.name for dep in manifest.dependentAssemblies]: print ("Adding %s to dependent assemblies " "of %s") % (pydep.name, cachedfile) manifest.dependentAssemblies.append(pydep) if len(manifest.dependentAssemblies) > olen: manifest.update_resources(cachedfile, [name], [language]) if cmd: system(cmd) # update cache index cache_index[basenm] = digest _save_data(cacheindexfn, cache_index) return cachedfile
|
pattern_list = eval(open(newlog, 'r').read())
|
pattern_list = eval(open(logfn, 'r').read())
|
def runtests(alltests, filters=None, configfile=None, run_executable=1, verbose=False): info = "Executing PyInstaller tests in: %s" % os.getcwd() print "*" * min(80, len(info)) print info print "*" * min(80, len(info)) OPTS = '' if configfile: # todo: quote correctly OTPS = ' -c "%s"' % configfile build_python = open("python_exe.build", "w") build_python.write(sys.executable+"\n") build_python.write("debug=%s" % __debug__+"\n") build_python.close() if not filters: tests = alltests else: tests = [] for part in filters: tests += [t for t in alltests if part in t and t not in tests] tests = [(len(x), x) for x in tests] tests.sort() path = os.environ["PATH"] counter = { "passed": [], "failed": [], "skipped": [] } for _,test in tests: test = os.path.splitext(os.path.basename(test))[0] if test in MIN_VERSION and MIN_VERSION[test] > sys.version_info: counter["skipped"].append(test) continue if test in DEPENDENCIES: failed = False for mod in DEPENDENCIES[test]: res = os.system(PYTHON + ' -c "import %s"' % mod) if res != 0: failed = True break if failed: if verbose: print "Skipping test because module %s is missing" % mod counter["skipped"].append(test) continue _msg("BUILDING TEST", test) prog = string.join([PYTHON, PYOPTS, os.path.join(HOME, 'Build.py'), OPTS, test+".spec"], ' ') print "BUILDING:", prog res = os.system(prog) if res == 0 and run_executable: files = glob.glob(os.path.join('dist', test + '*')) for exe in files: exe = os.path.splitext(exe)[0] res_tmp = test_exe(exe[5:]) res = res or res_tmp logsfn = glob.glob(test + '.log') logsfn += glob.glob(test + '_?.log') for logfn in logsfn: _msg("EXECUTING MATCHING", logfn) tmpname = os.path.splitext(logfn)[0] newlog = os.path.join('dist', logfn) prog = find_exepath(tmpname) if prog is None: prog = find_exepath(tmpname, os.path.join('dist', test)) command = string.join([PYTHON, PYOPTS, os.path.join(HOME, 'ArchiveViewer.py'), '-b -r >> ' + newlog, prog], ' ') os.system(command) pattern_list = eval(open(newlog, 'r').read()) fname_list = eval(open(newlog, 'r').read()) count = 0 for pattern in pattern_list: found = False for fname in fname_list: if re.match(pattern, fname): count += 1 found =True if verbose: print "MATCH: %s --> %s" % (pattern, fname) break if not found: print "MISSING: %s" % pattern if count < len(pattern_list): res = 1 print "Matching FAILED!" else: print "Matching SUCCESS!" if res == 0: _msg("FINISHING TEST", test, short=1) counter["passed"].append(test) else: _msg("TEST", test, "FAILED", short=1, sep="!!") counter["failed"].append(test) pprint.pprint(counter)
|
print "MISSING: %s" % pattern
|
if verbose: print "MISSING: %s" % pattern
|
def runtests(alltests, filters=None, configfile=None, run_executable=1, verbose=False): info = "Executing PyInstaller tests in: %s" % os.getcwd() print "*" * min(80, len(info)) print info print "*" * min(80, len(info)) OPTS = '' if configfile: # todo: quote correctly OTPS = ' -c "%s"' % configfile build_python = open("python_exe.build", "w") build_python.write(sys.executable+"\n") build_python.write("debug=%s" % __debug__+"\n") build_python.close() if not filters: tests = alltests else: tests = [] for part in filters: tests += [t for t in alltests if part in t and t not in tests] tests = [(len(x), x) for x in tests] tests.sort() path = os.environ["PATH"] counter = { "passed": [], "failed": [], "skipped": [] } for _,test in tests: test = os.path.splitext(os.path.basename(test))[0] if test in MIN_VERSION and MIN_VERSION[test] > sys.version_info: counter["skipped"].append(test) continue if test in DEPENDENCIES: failed = False for mod in DEPENDENCIES[test]: res = os.system(PYTHON + ' -c "import %s"' % mod) if res != 0: failed = True break if failed: if verbose: print "Skipping test because module %s is missing" % mod counter["skipped"].append(test) continue _msg("BUILDING TEST", test) prog = string.join([PYTHON, PYOPTS, os.path.join(HOME, 'Build.py'), OPTS, test+".spec"], ' ') print "BUILDING:", prog res = os.system(prog) if res == 0 and run_executable: files = glob.glob(os.path.join('dist', test + '*')) for exe in files: exe = os.path.splitext(exe)[0] res_tmp = test_exe(exe[5:]) res = res or res_tmp logsfn = glob.glob(test + '.log') logsfn += glob.glob(test + '_?.log') for logfn in logsfn: _msg("EXECUTING MATCHING", logfn) tmpname = os.path.splitext(logfn)[0] newlog = os.path.join('dist', logfn) prog = find_exepath(tmpname) if prog is None: prog = find_exepath(tmpname, os.path.join('dist', test)) command = string.join([PYTHON, PYOPTS, os.path.join(HOME, 'ArchiveViewer.py'), '-b -r >> ' + newlog, prog], ' ') os.system(command) pattern_list = eval(open(newlog, 'r').read()) fname_list = eval(open(newlog, 'r').read()) count = 0 for pattern in pattern_list: found = False for fname in fname_list: if re.match(pattern, fname): count += 1 found =True if verbose: print "MATCH: %s --> %s" % (pattern, fname) break if not found: print "MISSING: %s" % pattern if count < len(pattern_list): res = 1 print "Matching FAILED!" else: print "Matching SUCCESS!" if res == 0: _msg("FINISHING TEST", test, short=1) counter["passed"].append(test) else: _msg("TEST", test, "FAILED", short=1, sep="!!") counter["failed"].append(test) pprint.pprint(counter)
|
paths.extend(glob(os.path.join(dirnm, self.name)))
|
paths.extend(glob(os.path.join(dirnm, self.name + ext)))
|
def find_files(self, ignore_policies=True): """ Search shared and private assemblies and return a list of files. If any files are not found, return an empty list. IMPORTANT NOTE: For the purpose of getting the dependent assembly files of an executable, the publisher configuration (aka policy) should be ignored (which is the default). Setting ignore_policies=False is only useful to find out which files are actually loaded at runtime. """ # Shared Assemblies: # http://msdn.microsoft.com/en-us/library/aa375996%28VS.85%29.aspx # # Private Assemblies: # http://msdn.microsoft.com/en-us/library/aa375674%28VS.85%29.aspx # # Assembly Searching Sequence: # http://msdn.microsoft.com/en-us/library/aa374224%28VS.85%29.aspx # # NOTE: # Multilanguage User Interface (MUI) support not yet implemented files = [] languages = [] if self.language not in (None, "", "*", "neutral"): languages.append(self.getlanguage()) if "-" in self.language: # language-culture syntax, e.g. en-us # Add only the language part languages.append(self.language.split("-")[0]) if self.language not in ("en-us", "en"): languages.append("en-us") if self.language != "en": languages.append("en") languages.append(self.getlanguage("*")) winsxs = os.path.join(os.getenv("SystemRoot"), "WinSxS") if not os.path.isdir(winsxs) and not silent: print "W: No such dir", winsxs manifests = os.path.join(winsxs, "Manifests") if not os.path.isdir(manifests) and not silent: print "W: No such dir", manifests if not ignore_policies and self.version: if sys.getwindowsversion() < (6, ): # Windows XP pcfiles = os.path.join(winsxs, "Policies") if not os.path.isdir(pcfiles) and not silent: print "W: No such dir", pcfiles else: # Vista or later pcfiles = manifests for language in languages: version = self.version # Search for publisher configuration if not ignore_policies and version: # Publisher Configuration (aka policy) # A publisher configuration file globally redirects # applications and assemblies having a dependence on one # version of a side-by-side assembly to use another version of # the same assembly. This enables applications and assemblies # to use the updated assembly without having to rebuild all of # the affected applications. # http://msdn.microsoft.com/en-us/library/aa375680%28VS.85%29.aspx # # Under Windows XP and 2003, policies are stored as # <version>.policy files inside # %SystemRoot%\WinSxS\Policies\<name> # Under Vista and later, policies are stored as # <name>.manifest files inside %SystemRoot%\winsxs\Manifests redirected = False if os.path.isdir(pcfiles): if not silent: print ("I: Searching for publisher configuration %s..." % self.getpolicyid(True, language=language)) if sys.getwindowsversion() < (6, ): # Windows XP policies = os.path.join(pcfiles, self.getpolicyid(True, language=language) + ".policy") else: # Vista or later policies = os.path.join(pcfiles, self.getpolicyid(True, language=language) + ".manifest") for manifestpth in glob(policies): if not os.path.isfile(manifestpth): if not silent: print "W: Not a file", manifestpth continue if not silent: print "I: Found", manifestpth try: policy = ManifestFromXMLFile(manifestpth) except Exception, exc: print "E: Could not parse file", manifestpth print "E:", str(exc) else: if not silent: print ("I: Checking publisher policy for " "binding redirects") for assembly in policy.dependentAssemblies: if not assembly.same_id(self, True) or \ assembly.optional: continue for redirect in \ assembly.bindingRedirects: if not silent: old = "-".join([".".join([str(i) for i in part]) for part in redirect[0]]) new = ".".join([str(i) for i in redirect[1]]) print "I: Found redirect for " \ "version(s)", old, "->", new if version >= redirect[0][0] and \ version <= redirect[0][-1] and \ version != redirect[1]: if not silent: print "I: Applying redirect", \ ".".join([str(i) for i in version]), \ "->", new version = redirect[1] redirected = True if not redirected and not silent: print "I: Publisher configuration not used" # Search for assemblies according to assembly searching sequence paths = [] if os.path.isdir(manifests): # Add winsxs search paths paths.extend(glob(os.path.join(manifests, self.getid(language=language, version=version) + "_*.manifest"))) if self.filename: # Add private assembly search paths dirnm = os.path.dirname(self.filename) if language in (LANGUAGE_NEUTRAL_NT5, LANGUAGE_NEUTRAL_NT6): for ext in (".dll", ".manifest"): paths.extend(glob(os.path.join(dirnm, self.name))) paths.extend(glob(os.path.join(dirnm, self.name, self.name + ext))) else: for ext in (".dll", ".manifest"): paths.extend(glob(os.path.join(dirnm, language, self.name + ext))) for ext in (".dll", ".manifest"): paths.extend(glob(os.path.join(dirnm, language, self.name, self.name + ext))) if not silent: print ("I: Searching for assembly %s..." % self.getid(language=language, version=version)) for manifestpth in paths: if not os.path.isfile(manifestpth): if not silent: print "W: Not a file", manifestpth continue assemblynm = os.path.basename( os.path.splitext(manifestpth)[0]) if not silent: if manifestpth.endswith(".dll"): print "I: Found manifest in", manifestpth else: print "I: Found manifest", manifestpth try: if manifestpth.endswith(".dll"): manifest = ManifestFromResFile(manifestpth, [1]) else: manifest = ManifestFromXMLFile(manifestpth) except Exception, exc: print "E: Could not parse manifest", manifestpth print "E:", exc else: if manifestpth.startswith(winsxs): assemblydir = os.path.join(winsxs, assemblynm) if not os.path.isdir(assemblydir): if not silent: print "W: No such dir", assemblydir print "W: Assembly incomplete" return [] else: assemblydir = os.path.dirname(manifestpth) files.append(manifestpth) for file_ in self.files or manifest.files: fn = file_.find(assemblydir) if fn: files.append(fn) else: # If any of our files does not exist, # the assembly is incomplete if not silent: print "W: Assembly incomplete" return [] return files
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.