query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Set the status of the events with the given rule IDs | def setStatusByIDs(self, rule_ids, urgency, status, comment, newOwner, reviewTime, session_key, currentUser=None, existing_statuses=None):
# This class provides information on the operations performed
status_change_meta = LogReviewStatusChanges()
# Make sure the comment is the minimum length (if defined)
minimum_length = self.commentLengthRequired(session_key)
if len(comment.strip()) < minimum_length:
# Return a message noting that the minimum length was not met
status_change_meta.incrementFailureCountEx(["comment length does not meet minimum requirement (must be %d characters long or more)" % (minimum_length)])
return status_change_meta
# Get the existing statuses
existing_statuses = existing_statuses or self.getCurrentValues(session_key, rule_ids)
status_records = []
success_count = 0
# Append the new entries
for rule_id in rule_ids:
status_records.append(LogReviewStatus(reviewTime, rule_id, newOwner, urgency, status, comment, currentUser, None, rule_id + '_' + str(reviewTime)))
# Perform the save in chunks.
for chunk in [status_records[i:i + self.BATCH_SAVE_LIMIT] for i in range(0, len(status_records), self.BATCH_SAVE_LIMIT)]:
try:
success_count += self.updateEvents(chunk, session_key, existing_statuses)
except Exception:
pass
# Update status change metadata.
if len(rule_ids) == success_count:
# All successful.
status_change_meta.incrementSuccessCount(success_count)
else:
# Some failures.
status_change_meta.incrementSuccessCount(success_count)
status_change_meta.incrementFailureCount('some notable event(s) could not be updated', len(rule_ids) - success_count)
return status_change_meta | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setStatuses(self, urgency, status, comment, newOwner, currentUser, ruleUIDs, searchID, reviewTime, existing_statuses, capabilities, session_key):\n\n # Print a log message noting that an operation is about to happen\n if ruleUIDs is not None and searchID is not None:\n logger.info(\"About to edit events matching search %s (though only %d events are to be modified)\", searchID, len(ruleUIDs))\n if searchID is None and (ruleUIDs is not None and len(ruleUIDs) > 0):\n logger.info(\"About to edit events by ID (%d events are to be modified)\", searchID, len(ruleUIDs))\n else:\n logger.info(\"About to edit events matching all events matching search %s\", searchID)\n\n # Refresh the correlation searches list so we don't have to later\n self.refreshCorrelationSearches(session_key)\n\n # Perform the changes\n if searchID is None:\n result = self.setStatusByIDs(ruleUIDs, urgency, status, comment, newOwner, reviewTime, session_key, currentUser, existing_statuses=existing_statuses)\n logger.info(\"Done editing events\")\n return result\n else:\n result = self.setStatusBySearchID(searchID, urgency, status, comment, newOwner, reviewTime, capabilities, session_key, currentUser, force_refresh=False, rule_ids_to_change=ruleUIDs, existing_statuses=existing_statuses)\n logger.info(\"Done editing events matching search %s\", searchID)\n return result",
"async def set_rules(self, ctx: discord.ext.commands.context.Context, *, rules: str):\n guild_info = server_setup.get_guild_info(ctx.guild)\n\n if guild_info[\"rulesChannelID\"] is not None:\n rules_channel = server_setup.get_channel(guild=ctx.guild, channel_id=guild_info[\"rulesChannelID\"])\n embed = await format_rules(rules=rules, title=\"Rules\",\n description=\"You must follow these rules at all times\")\n\n if guild_info[\"rulesMessageID\"] is not None:\n message = await rules_channel.fetch_message(guild_info[\"rulesMessageID\"])\n\n await message.edit(embed=embed)\n\n else:\n message = await rules_channel.send(embed=embed)\n guild_info[\"rulesMessageID\"] = message.id\n\n server_setup.update_guild(guild_info=guild_info)\n\n guild_info[\"rules\"] = rules\n server_setup.update_guild(guild_info=guild_info)\n\n else:\n await ctx.send(\"You must create a rules channel before you may set the rules message.\")\n\n print(\"Rules have been updated.\")",
"def update_rules():\n update_all_rules()\n return \"OK\"",
"def status_ids(self, status_ids):\n\n self._status_ids = status_ids",
"def rule_id(self, rule_id):\n\n self._rule_id = rule_id",
"def change_status(self, status, application_id):",
"def setAttendeeStatus(self, changed_attendee, status):\n change = 0\n for attendee in self.attendees:\n if attendee['rpath'] == changed_attendee:\n attendee['status'] = status\n change = 1\n if change:\n self._p_changed = 1",
"def setMyStatus(self, status, comment='', REQUEST=None):\n calendar = self.getCalendar()\n calendar_rpath = calendar.getRpath()\n event_id = self.getId()\n (member, member_cn, dtstamp) = self._getRequestInformations()\n for attendee in self.attendees:\n if attendee['rpath'] == calendar_rpath:\n old_status = attendee['status']\n attendee['status'] = status\n if status != old_status:\n if status == 'decline':\n calendar.declineEvent(self)\n if old_status == 'decline':\n calendar.unDeclineEvent(self)\n self._p_changed = 1\n \n # Set up the dict for email notification\n mtool = getToolByName(calendar, 'portal_membership')\n userid = mtool.getAuthenticatedMember().getUserName()\n try:\n cn = self.getAttendeeInfo(calendar.getRpath()).get('cn', id)\n except AttributeError:\n cn = userid\n event_dict = {\n 'id': userid,\n 'request': 'status',\n 'change': ({\n 'attendee': calendar_rpath,\n 'cn': cn,\n 'type': self.getCalendar().usertype,\n 'status': status,\n 'comment': comment,\n 'dtstamp': dtstamp,\n 'sender': member,\n 'sender_cn': member_cn,\n },)\n }\n \n # Change the status for all attendees calendars.\n # Get the attendeelist from the organizers calendar, \n # since new attendees may have been added or old removed.\n org_calendar = self.getOrganizerCalendar()\n org_event = org_calendar._getOb(event_id, None)\n if org_event is None:\n LOG('NGCal', INFO, \"Can't find original event for %s/%s\" \n % (calendar_rpath, event_id))\n return\n \n ctool = getToolByName(self, 'portal_cpscalendar')\n org_attendees = org_event.attendees\n \n for attendee in org_attendees:\n apath = attendee['rpath']\n # Skip this calendar\n if apath == calendar_rpath:\n continue\n acal = ctool.getCalendarForPath(apath, unrestricted=1)\n event = acal._getOb(event_id, None)\n if event is not None:\n event.setAttendeeStatus(calendar_rpath, status)\n \n # Check pending events\n for event in acal._pending_events:\n if event['id'] != event_id:\n continue\n for att in event['event']['attendees']:\n if att['rpath'] == calendar_rpath:\n att['status'] = status\n acal._p_changed = 1\n \n # This needs some testing to see that it really does\n # the correct thing.\n acal.notifyMembers(event_dict)\n \n if REQUEST is not None:\n REQUEST.RESPONSE.redirect(self.absolute_url())",
"def handle_event(self, event):\n # Handle color change requests.\n super().handle_event(event)\n\n\n if event in ['Rule_nbr'] + CA_World.bin_0_to_7:\n if event == 'Rule_nbr':\n self.rule_nbr = SimEngine.gui_get('Rule_nbr')\n\n if event in CA_World.bin_0_to_7:\n self.get_rule_nbr_from_switches()\n\n self.make_switches_and_rule_nbr_consistent()",
"def modifyRule(request):\n\t# We set up the logger and a few lists.\n\tlogger = logging.getLogger(__name__)\n\tresponse = []\n\tsids = []\n\truleSets = []\n\t\n\t# If the POST contains sids, we're processing rules.\n\tif request.POST.get('sids'):\n\t\tsids = json.loads(request.POST.get('sids'))\n\t# If the POST contains ruleset, we're processing rulesets.\n\tif request.POST.get('ruleset'):\n\t\truleSets = request.POST.getlist('ruleset')\n\t# Get the mode as well.\n\tmode = request.POST.get('mode')\n\t\n\t# We translate the mode into true or false.\n\tif mode == \"enable\":\n\t\tactive = True\n\telif mode == \"disable\":\n\t\tactive = False\n\telse:\n\t\tlogger.error(\"Invalid mode '\"+str(mode)+\"'. Rule(s) not modified.\")\n\t\tresponse.append({'response': 'invalidMode', 'text': 'Rule modification failed, invalid mode. \\nContact administrator.\\n\\n'})\n\t\treturn HttpResponse(json.dumps(response))\n\t\n\t# We only need to process rules if there are some in the list.\n\tif len(sids) == 0:\n\t\tresponse.append({'response': 'noSids'})\n\telse: \n\t\t# We use this list to return which rules got changed successfully.\n\t\tgoodsids = []\n\t\t# We iterate over the sids provided.\n\t\tfor sid in sids:\n\t\t\t# If we find the rule, we update its active flag to reflect the new status.\n\t\t\ttry:\n\t\t\t\tr = Rule.objects.filter(SID=sid).update(active=active)\n\t\t\t\tgoodsids.append({'sid': sid, 'mode': mode})\n\t\t\t\tlogger.info(\"Rule \"+str(r)+\" is now \"+str(mode)+\"d.\")\n\t\t\texcept Rule.DoesNotExist:\n\t\t\t\tresponse.append({'response': 'ruleDoesNotExist', 'text': 'Rule '+sid+' could not be found. \\nIt has not been modified.\\n\\n'})\n\t\t\t\tlogger.warning(\"Rule \"+str(sid)+\" could not be found.\")\n\t\t\t\t\n\t\tresponse.append({'response': 'ruleModificationSuccess', 'sids': goodsids})\n\t\t\n\t# We only need to process rulesets if there are some in the list.\n\tif len(ruleSets) == 0:\n\t\tresponse.append({'response': 'noSets'})\n\telse: \n\t\t# We use this list to return which rulesets got changed successfully.\n\t\tgoodRuleSets = []\n\t\t\n\t\t# Global is used to determine if the rulset is to be modified globally or per sensor.\n\t\tif request.POST.get('global'):\n\t\t\tglobalmodify = request.POST['global']\n\t\telse:\n\t\t\tglobalmodify = \"\"\n\t\t\t\n\t\t# If its global, we just change the active flag of the ruleset.\n\t\tif globalmodify == \"on\":\n\t\t\tfor ruleSet in ruleSets:\n\t\t\t\ttry:\n\t\t\t\t\tr = RuleSet.objects.filter(id=ruleSet).update(active=active)\n\t\t\t\t\tgoodRuleSets.append({'set': ruleSet, 'mode': mode})\n\t\t\t\t\tlogger.info(\"RuleSet \"+str(r)+\" is now \"+str(mode)+\"d.\")\n\t\t\t\texcept RuleSet.DoesNotExist:\n\t\t\t\t\tresponse.append({'response': 'ruleSetDoesNotExist', 'text': 'RuleSet '+ruleSet+' could not be found. \\nIt has not been modified.\\n\\n'})\n\t\t\t\t\tlogger.warning(\"RuleSet \"+str(ruleSet)+\" could not be found.\")\n\t\t\t\t\t\n\t\t\tresponse.append({'response': 'ruleSetModificationSuccess', 'sets': goodRuleSets})\n\t\t\t\n\t\t# If its not global, we have to iterate over all the sensors provided and add/remove the rulesets.\n\t\telse:\n\t\t\tsensors = request.POST.getlist('sensors')\n\t\t\t# If we didnt pick all sensors, we gotta iterate over all the ones we selected. \n\t\t\tsensorList = []\n\t\t\tallSensor = False\n\t\t\tfor sensor in sensors:\n\t\t\t\ttry:\n\t\t\t\t\ts = Sensor.objects.get(id=sensor)\n\t\t\t\t\t\n\t\t\t\t\tif s.name == \"All\":\n\t\t\t\t\t\tsensorList = [s]\n\t\t\t\t\t\tallSensor = True\n\t\t\t\t\t\tbreak\n\t\t\t\t\tsensorList.append(s)\n\t\t\t\texcept Sensor.DoesNotExist:\n\t\t\t\t\tresponse.append({'response': 'sensorDoesNotExist', 'text': 'Sensor with DB ID '+sensor+' does not exist.'})\n\t\t\t\t\tlogger.warning(\"Sensor \"+str(sensor)+\" could not be found.\")\n\t\t\t\t\n\t\t\tfor ruleSet in ruleSets:\n\t\t\t\ttry:\n\t\t\t\t\tr = RuleSet.objects.get(id=ruleSet)\n\t\t\t\t\t\n\t\t\t\t\tif \"All\" in r.sensors.values_list('name', flat=True):\n\t\t\t\t\t\tallInSet = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tallInSet = False\n\t\t\t\t\t\t\n\t\t\t\t\tif r.sensors.count():\n\t\t\t\t\t\tsetHasSensors = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tsetHasSensors = False\n\t\t\t\t\t\t\n\t\t\t\t\tif active:\n\t\t\t\t\t\tif allSensor and setHasSensors and not allInSet:\n\t\t\t\t\t\t\tr.sensors.clear\n\t\t\t\t\t\t\tr.sensors.add(*sensorList) # This is where the ruleset is tied to the sensor.\n\t\t\t\t\t\telif allSensor and allInSet:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tr.sensors.add(*sensorList) # This is where the ruleset is tied to the sensor.\n\t\t\t\t\telse:\n\t\t\t\t\t\tif allSensor and setHasSensors:\n\t\t\t\t\t\t\tr.sensors.clear()\n\t\t\t\t\t\telif not allSensor and allInSet:\n\t\t\t\t\t\t\tr.sensors.clear()\n\t\t\t\t\t\t\ts = Sensor.objects.exclude(name=\"All\").all()\n\t\t\t\t\t\t\tr.sensors.add(*s)\n\t\t\t\t\t\t\tr.sensors.remove(*sensorList) # This is where the ruleset is removed from the sensor.\n\t\t\t\t\t\telif (allSensor and allInSet) or not setHasSensors:\n\t\t\t\t\t\t\tpass\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tr.sensors.remove(*sensorList) # This is where the ruleset is removed from the sensor.\n\t\t\t\t\t\t\n\t\t\t\t\tgoodRuleSets.append({'set': ruleSet, 'mode': mode, 'sensor': sensor})\n\t\t\t\t\tlogger.info(\"RuleSet \"+str(r)+\" is now \"+str(mode)+\"d on sensor \"+str(s)+\".\")\n\t\t\t\texcept RuleSet.DoesNotExist:\n\t\t\t\t\tresponse.append({'response': 'ruleSetDoesNotExist', 'text': 'RuleSet '+ruleSet+' could not be found. \\nIt has not been modified.\\n\\n'})\n\t\t\t\t\tlogger.warning(\"RuleSet \"+str(ruleSet)+\" could not be found.\")\n\t\t\t\t\n\t\t\t\t\t\n\t\t\tresponse.append({'response': 'ruleSetModificationSuccess', 'sets': goodRuleSets})\n\t\n\treturn HttpResponse(json.dumps(response))",
"def updateEvents(self, status_records, session_key, existing_statuses=None):\n\n for status_record in status_records:\n status_record.update_from_existing(existing_statuses.get(status_record.rule_id))\n\n # Update.\n unused_response, content = self.kv.batch_create([\n vars(i) for i in status_records], session_key, self.DEFAULT_OPTIONS)\n\n # Audit.\n audited_bool = self.create_audit_records(status_records, session_key)\n\n # Note: we DO NOT abort or raise an exception for failure to audit events,\n # to preserve the previous behavior of incident review's former CSV-backed\n # implementation.\n if not audited_bool:\n logger.error('Could not create some audit record for notable event status changes: changed_records=\"%s\"', content)\n\n # The content object contains a JSON list of the records that were updated,\n # in the format [ <rule_id>_<timestamp>, ... ]\n parsed_content = json.loads(content)\n return len(parsed_content)",
"def edit_ongoing_rule():\n rules = request.json['rules']\n now = datetime.datetime.now()\n\n for rule in rules:\n rule['line_id'] = int(rule['line_id'])\n rule['time'] = convert_to_datetime(rule['time'])\n rule['intervals'] = int(rule['intervals'])\n rule['time_wait'] = int(rule['time_wait'])\n rule['repeat_value'] = int(rule['repeat_value'])\n rule['date_start'] = convert_to_datetime(rule['date_start'])\n rule['time_start'] = convert_to_datetime(rule['time_start'])\n rule['date_time_start'] = datetime.datetime.combine(\n rule['date_start'], rule['time_start'].time())\n rule['end_date'] = convert_to_datetime(rule['end_date'])\n rule['rule_id'] = rule['rule_id']\n rule['days'] = -1\n\n if rule['date_start'].date() == rule['end_date'].date():\n date_delta = rule['end_date'].date() - now.date()\n if date_delta.days == 0:\n rule['days'] = 0\n if date_delta.days == 1:\n rule['days'] = 1\n\n # \"UPDATE ongoing_rules\n # SET line_id = {0}, time = {1}, intervals = {2}, time_wait = {3}, repeat_value={4}, date_time_start='{5}'\"\n # end_date = '{6}' WHERE rule_id = '{7}'\"\n database.update(database.QUERY[mn() + '_ongoing'].format(\n rule['line_id'], rule['time'], rule['intervals'], rule['time_wait'],\n rule['repeat_value'], rule['date_time_start'],\n rule['end_date'], rule['rule_id']))\n\n # update rules;\n update_rules_from_ongoing_rules(rule)\n # update_all_rules()\n logging.info(\"Ongoing rule modified. {0}\".format(str(rule)))\n\n send_ongoing_rule_message('edit_ongoing_rule', rule)\n\n return json.dumps({'status': 'OK'})",
"def set_device_rules(self, rules, rule_objs):\n self.logger.debug(\"set_device_rules: rules: {}\".format(rules))\n self._load_device_rules(rules, rule_objs=rule_objs)\n self._determine_cli_command_list()\n self._determine_get_method_list()",
"def set_event_status(self, new_event_status):\n self.event_status = new_event_status",
"def setStatusBySearchID(self, searchID, urgency, status, comment, newOwner, reviewTime, capabilities, session_key, currentUser=None, force_refresh=False, rule_ids_to_change=None, existing_statuses=None):\n\n # This class instance will record the number of events successfully changed\n status_change_meta = LogReviewStatusChanges()\n\n # Get the search job (this will throw a splunk.ResourceNotFound exception if the search cannot be found)\n try:\n dataset = self.getSearchResults(searchID, session_key)\n except splunk.ResourceNotFound:\n logger.warn(\"The search ID %s is no longer accessible, please refresh and try editing the events again\", searchID)\n status_change_meta.incrementFailureCountEx([\"The search is no longer accessible, please refresh and try editing the events again\"])\n return status_change_meta\n except NotEventSearchException:\n status_change_meta.incrementFailureCountEx([\"The search is not an event search; searches returning results (instead of events) cannot be used\"])\n return status_change_meta\n except SearchNotDoneException:\n status_change_meta.incrementFailureCountEx([\"The search is not done; the search must be completed before results can be processed\"])\n return status_change_meta\n\n # Get the existing statuses so that the entries can inherit items as necessary\n if existing_statuses is None:\n existing_statuses = self.getCurrentValues(session_key, rule_ids_to_change)\n\n # Make sure the comment is the minimum length (if defined)\n minimum_length = self.commentLengthRequired(session_key)\n if len(comment.strip()) < minimum_length:\n status_change_meta.incrementFailureCountEx([\"comment length does not meet minimum requirement (must be %d characters long or more)\" % (minimum_length)])\n return status_change_meta\n\n # Determine if urgency changes are allowed\n allowUrgencyChanges = self.isUrgencyOverrideAllowed(session_key)\n\n # If we are not allowed to change the urgency, then set it to none to indicate that it ought not be changed\n if allowUrgencyChanges is False:\n urgency = None\n\n # Make a copy of the rules IDs that we are planning to change so that we can exit early from looping through\n # the search results once we get done editing the entries\n rule_ids_to_change_left = None\n\n if rule_ids_to_change is not None:\n rule_ids_to_change_left = rule_ids_to_change[:] # Make a copy, we don't want to edit the original\n\n # Counters\n evaluated = 0\n\n # Notable events to be edited\n status_records = []\n\n # Create a status entry for each event\n for event in dataset:\n\n evaluated += 1\n\n # Stop processing the events if already handled all of the events we expected to handle\n if rule_ids_to_change_left is not None and len(rule_ids_to_change_left) == 0:\n break\n\n if 'rule_id' in event:\n rule_id = str(event['rule_id'])\n\n # Only change the given event if it is in the list to change\n if rule_ids_to_change is not None and rule_id not in rule_ids_to_change:\n continue\n\n if 'source' in event:\n correlation_search = str(event['source'])\n else:\n correlation_search = None\n\n rule_name = self.correlation_search_info.get(correlation_search, {}).get('rule_name')\n\n # Make sure that the user has the capability\n capability_issues = self.checkTransition(rule_id, correlation_search, status, capabilities,\n session_key, existing_statuses, force_refresh)\n\n # Stop if the permission check failed\n if capability_issues is not None and len(capability_issues) > 0:\n status_change_meta.incrementFailureCountEx(capability_issues)\n else:\n # Add the record to the list of records to be saved.\n status_records.append(LogReviewStatus(reviewTime, rule_id, newOwner, urgency, status, comment,\n currentUser, rule_name, rule_id + '_' + str(reviewTime)))\n if rule_ids_to_change_left is not None:\n rule_ids_to_change_left.remove(rule_id)\n else:\n status_change_meta.incrementFailureCount(\"rule_id field not found in the event\")\n\n logger.debug(\"Evaluated %i events for editing\", evaluated)\n\n success_count = 0\n # Perform the save in chunks and return status.\n for chunk in [status_records[i:i + self.BATCH_SAVE_LIMIT] for i in range(0, len(status_records), self.BATCH_SAVE_LIMIT)]:\n try:\n success_count += self.updateEvents(chunk, session_key, existing_statuses)\n except Exception as e:\n logger.exception('Exception when updating notable events: %s', e)\n\n # Update status change metadata.\n # Case 1: updating all events in the search\n # Case 2: updating only selected events\n if (not rule_ids_to_change and success_count == evaluated) or (rule_ids_to_change and len(rule_ids_to_change) == success_count):\n # All successful.\n status_change_meta.incrementSuccessCount(success_count)\n else:\n # Some failures.\n status_change_meta.incrementSuccessCount(success_count)\n status_change_meta.incrementFailureCount('some notable event(s) could not be updated', evaluated - success_count)\n\n return status_change_meta",
"def rule(self, rules):\n\n if not isinstance(rules, list):\n rules = [rules]\n\n for rule in rules:\n self.__addRule(rule)",
"def set_arc_status(self, nodeA_id, nodeB_id, status): \n try:\n arcs_set_A = self._inc[nodeA_id]\n for arc in arcs_set_A:\n if arc._head == nodeB_id:\n arc.status = status\n break\n except KeyError:\n return",
"def activate_ongoing_rule():\n rule_id = request.args.get('id')\n database.update(database.QUERY[mn() + '_ongoing'].format(rule_id))\n database.update(database.QUERY[mn() + '_life'].format(rule_id))\n update_all_rules()\n\n send_ongoing_rule_message('ongoing_rule_state', {'rule_id': rule_id, 'status': 1})\n return json.dumps({'status': 'OK'})",
"def id_status(self, id_status):\n self._id_status = id_status",
"def set_rules(rules, overwrite=True, use_conf=False):\n\n init(use_conf=False)\n _ENFORCER.set_rules(rules, overwrite, use_conf)",
"def update_rules_from_ongoing_rules(rule):\n database.update(database.QUERY[mn() + '_remove_from_life'].format(rule['rule_id']))\n\n _delta = rule['end_date'] - rule['date_time_start']\n _days = _delta.days + 1\n logging.info(\"number of days: {0}\".format(_days))\n\n ongoing_rule_id = rule['rule_id']\n\n for days_to_add in range(0, _days + 1, rule['repeat_value']):\n date_datetime = rule['date_time_start'] + datetime.timedelta(days=days_to_add)\n\n # start_time = rule['date_time_start']\n branch_id = int(rule['line_id'])\n time_min = int(rule['time'])\n time_wait = int(rule['time_wait'])\n num_of_intervals = int(rule['intervals'])\n interval_id = str(uuid.uuid4())\n\n stop_datetime = date_datetime + datetime.timedelta(minutes=time_min)\n\n database.update(database.QUERY[mn() + '_add_rule_to_life'].format(\n branch_id, START_RULE, ENABLED_RULE,\n date_datetime.date(), date_datetime,\n interval_id, time_min, ongoing_rule_id))\n database.update(database.QUERY[mn() + '_add_rule_to_life'].format(\n branch_id, STOP_RULE, ENABLED_RULE,\n date_datetime.date(), stop_datetime,\n interval_id, 0, ongoing_rule_id))\n\n logging.info(\"Start time: {0}. Stop time: {1} added to database\".format(str(date_datetime), str(stop_datetime)))\n\n # first interval is executed\n for x in range(2, num_of_intervals + 1):\n date_datetime = stop_datetime + datetime.timedelta(minutes=time_wait)\n stop_datetime = date_datetime + datetime.timedelta(minutes=time_min)\n\n database.update(database.QUERY[mn() + '_add_rule_to_life'].format(\n branch_id, START_RULE, ENABLED_RULE,\n date_datetime.date(), date_datetime,\n interval_id, time_min, ongoing_rule_id))\n database.update(database.QUERY[mn() + '_add_rule_to_life'].format(\n branch_id, STOP_RULE, ENABLED_RULE,\n date_datetime.date(), stop_datetime,\n interval_id, 0, ongoing_rule_id))\n\n logging.info(\"Start time: {0}. Stop time: {1} added to database\".format(str(date_datetime), str(stop_datetime)))",
"def test_multiple_rules(self):\n\n # Prepare.\n app = self.factory()\n request = self.getRequest(app)\n context = model.factory(initial_state=s.STARTED)\n\n # Create a dummy event and get it back.\n event_id = boilerplate.createEvent(context)\n event = repo.LookupActivityEvent()(event_id)\n\n # Complete -> completed.\n state_changer = request.state_changer\n # We have to use a transaction manager because perform creates\n # a new event on state change.\n with transaction.manager:\n bm.Session.add(event)\n bm.Session.add(context)\n state_changer.perform(context, a.COMPLETE, event)\n s1 = context.work_status.value\n context_id = context.id\n\n self.assertEqual(s1, s.COMPLETED)\n\n # Complete -> absolutely completed.\n # We have to use a transaction manager because perform creates\n # a new event on state change.\n with transaction.manager:\n bm.Session.add(event)\n bm.Session.add(context)\n state_changer.perform(context, a.COMPLETE, event)\n s2 = context.work_status.value\n self.assertEqual(s2, s.ABSOLUTELY_COMPLETED)\n\n # Complete -> ... the same state ...\n # We have to use a transaction manager because perform creates\n # a new event on state change.\n context = model.Model.query.get(context_id)\n with transaction.manager:\n bm.Session.add(event)\n state_changer.perform(context, a.COMPLETE, event)\n s3 = context.work_status.value\n self.assertEqual(s3, s.ABSOLUTELY_COMPLETED)",
"def set_arc_status(self, nodeA_id, nodeB_id, status): \n try:\n arcs_list = self._inc[nodeA_id]\n record = arcs_list.get_first_record()\n while record is not None:\n arc = record.element\n if arc._head is nodeB_id: \n arc.status = status \n break\n record = record._next \n except KeyError:\n return",
"def set_rules(rules, overwrite=True, use_conf=False): # pragma: no cover\n init(use_conf=False)\n _ENFORCER.set_rules(rules, overwrite, use_conf)",
"def set_status(self, scenario_id, status):\n self.cur.execute(\n \"UPDATE execute_list SET status = %s WHERE id = %s\",\n (status, scenario_id),\n )",
"def change_status(self, personId, newStatus):\n self._timeline.add_event({\n 'type': EventTimeLine.PERSON_STATUS_CHANGE,\n 'personId': personId,\n 'newStatus': newStatus\n })",
"def set_switches_from_rule_nbr(self):\n for rule_switch, enabled in zip(CA_World.bin_0_to_7, self.int_to_8_bit_binary(self.rule_nbr)):\n SimEngine.gui_set(rule_switch, value=(True if enabled=='1' else False))",
"def update_status(self, id, status):\n sql = f\"UPDATE incidences SET status = \\'{status}\\'\\\n WHERE incidences.id = {id}\"\n conn = Db().con\n curr = conn.cursor()\n curr.execute(sql)\n conn.commit()",
"def delete_rules(self, ids, **params):\n json = {\"delete\": {\"ids\": []}}\n if isinstance(ids, (int, str, StreamRule)):\n ids = (ids,)\n for id in ids:\n if isinstance(id, StreamRule):\n json[\"delete\"][\"ids\"].append(str(id.id))\n else:\n json[\"delete\"][\"ids\"].append(str(id))\n\n return self._make_request(\n \"POST\", f\"/2/tweets/search/stream/rules\", params=params,\n endpoint_parameters=(\"dry_run\",), json=json, data_type=StreamRule\n )",
"def _bulk_threat_update_status(self, threat_ids, status, remediation, comment):\n if not all(isinstance(t, str) for t in threat_ids):\n raise ApiError(\"One or more invalid threat ID values\")\n request = {\"state\": status, \"threat_id\": threat_ids}\n if remediation is not None:\n request[\"remediation_state\"] = remediation\n if comment is not None:\n request[\"comment\"] = comment\n url = \"/appservices/v6/orgs/{0}/threat/workflow/_criteria\".format(self.credentials.org_key)\n resp = self.post_object(url, body=request)\n output = resp.json()\n return output[\"request_id\"]"
] | [
"0.5960008",
"0.5552889",
"0.55509925",
"0.5498841",
"0.5445532",
"0.5359694",
"0.53434515",
"0.5335274",
"0.5285304",
"0.5277969",
"0.5243045",
"0.5196009",
"0.51921904",
"0.51836336",
"0.5173424",
"0.51620984",
"0.5158724",
"0.5099852",
"0.5083945",
"0.5045462",
"0.5029228",
"0.49826527",
"0.49735418",
"0.49512196",
"0.49443787",
"0.49411726",
"0.49372956",
"0.4914596",
"0.4886505",
"0.48731562"
] | 0.66258126 | 0 |
Calculates 1/ from fit data f and possibly stretch exponent s, with errors if given | def calculate_f(f, s = None, f_err = None, s_err = None, scale = 1000):
if s is None:
return f, f_err
else:
f0 = f * s / gamma(1./s)
if (f_err is not None) and (s_err is not None):
sigma = np.sqrt(f_err ** 2 + ((s + polygamma(0, 1/s))/s/gamma(1/s)* s_err)**2)
else:
sigma = None
return f0, sigma | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ForceFitPowerlaw(p0, f, x, model='h'):\n hertz = ['h', 'H', 'hertz', 'Hertz']\n sneddon = ['s', 'S', 'sneddon', 'Sneddon']\n if model in hertz:\n model = 3./2\n def erf(p, f, x, model):\n return f - p[0]*np.power(x,model)\n elif model in sneddon:\n model = 2.\n def erf(p, f, x, model):\n return f - p[0]*np.power(x,model)\n else:\n def erf(p, f, x, model):\n return f - p[0]*np.power(x,model)\n\n fit = leastsq(erf, p0, args=(f,x,model))[0]\n return fit",
"def erf(x):\n return 0.0",
"def erfc(x):\n return 0.0",
"def _fit_func(en, a, b, c, d, e, f, g, h):\n return a*(1./en**b)*(1./(c+d/(en**e))) + f*exp(-g*(en-h)**2)",
"def biphasic_fit_function(x, a, b, c, d, e, f):\n term1 = 1 + (a + (1 - a)/(1 + (x * (10 ** b)) ** c))\n term2 = 1 + (d + (1 - d)/(1 + (x * (10 ** e)) ** f))\n\n biphasic_function = 2 ** (0.5 * (np.log2(term1) + np.log2(term2))) - 1\n return biphasic_function",
"def erf(data):\n return _make.erf(data)",
"def erf(F):\n def compute(value):\n if isinstance(value, Number):\n if sc is not None:\n return sc.erf(value)\n else:\n raise ValueError('Numbers are not supported as input if scipy is not installed')\n return F.npx.erf(value)\n return compute",
"def fit(self):\n\n fitdata = np.polyfit(self.v**(-2./3.), self.e, 3, full=True)\n ssr = fitdata[1]\n sst = np.sum((self.e - np.average(self.e))**2.)\n residuals0 = ssr/sst\n deriv0 = np.poly1d(fitdata[0])\n deriv1 = np.polyder(deriv0, 1)\n deriv2 = np.polyder(deriv1, 1)\n deriv3 = np.polyder(deriv2, 1)\n\n self.v0 = None\n for x in np.roots(deriv1):\n if x > 0 and deriv2(x) > 0:\n self.v0 = x**(-3./2.)\n break\n\n if self.v0 is None:\n raise ValueError('No minimum!')\n\n derivV2 = 4./9. * x**5. * deriv2(x)\n derivV3 = (-20./9. * x**(13./2.) * deriv2(x) -\n 8./27. * x**(15./2.) * deriv3(x))\n bulk_modulus0 = derivV2 / x**(3./2.)\n bulk_deriv0 = -1 - x**(-3./2.) * derivV3 / derivV2\n\n self.e0 = deriv0(x)\n self.B0 = bulk_modulus0\n self.B1 = bulk_deriv0\n\n return self.v0, self.e0, self.B0, self.B1, residuals0",
"def generic_s21(params, f):\n A = (params['A_mag'].value *\n np.exp(1j * params['A_phase'].value))\n f_0 = params['f_0'].value\n Q = params['Q'].value\n Q_e = (params['Q_e_real'].value +\n 1j * params['Q_e_imag'].value)\n return A * (1 - (Q * Q_e**-1 /\n (1 + 2j * Q * (f - f_0) / f_0)))",
"def F0(t):\n if (t < 1e-6):\n return 1.0 - t / 3.0\n else:\n return 0.5 * (np.pi / t) ** 0.5 * sp.erf(t ** 0.5)",
"def erfi(x):\n a = 0.147 # MAGIC!!!\n a1 = math.log(1 - x * x)\n a2 = (2.0 / (math.pi * a) + a1 / 2.0)\n\n return (sign(x) * math.sqrt( math.sqrt(a2 * a2 - a1 / a) - a2 ))",
"def steffensen ( fun , x , fx = None , args = () ) :\n \n if fx is None : fx = float ( fun ( x , *args ) ) ## reuse if already calculated\n if fx : \n gx = ( fun ( x + fx , *args ) - fx ) / fx\n if gx : return x - fx / gx",
"def f0(E, fermi, T):\n return 1. / (1. + np.exp((E - fermi) / (k_B * T)))",
"def fitSF_Sch10(jd,mag,errmag,nbin,bmin,bmax):\n\n tau,sf = SFSchmidt10(jd,mag,errmag,nbin,bmin,bmax)\n\n y=np.log10(sf)\n x=np.log10(tau)\n x=x[np.where((tau<=1) & (tau>0.01))]\n y=y[np.where((tau<=1) & (tau>0.01))]\n coefficients = np.polyfit(x, y, 1)\n\n A=10**(coefficients[1])\n gamma=coefficients[0]\n\n return(gamma, A)",
"def set_fitfunc(self):\n if self.amplitude != None:\n# print self.amplitude\n self.fitfunc = lambda p, x: (self.amplitude * exp(-x * p[0]))\n else: \n if self.offset:\n self.fitfunc = lambda p, x: (p[1] * exp(-x * p[0]) + p[2])\n else:\n self.fitfunc = lambda p, x: (p[1] * exp(-x * p[0]) + self.fixed_offset)",
"def test_genextreme_fit(self):\n p = generic.fit(self.genextreme, \"genextreme\")\n np.testing.assert_allclose(p, (0.20949, 297.954091, 75.7911863), 1e-5)",
"def F(x):\n return math.exp(-0.5 * (x ** 2))",
"def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ",
"def fitGammaErrFun(self, params, x, y, minLum, maxLum):\n if self.eq == 4:\n gamma, a, k = params\n _m = gammaFun(x, minLum, maxLum, gamma, eq=self.eq, a=a, k=k)\n model = numpy.asarray(_m)\n else:\n gamma = params[0]\n _m = gammaFun(x, minLum, maxLum, gamma, eq=self.eq)\n model = numpy.asarray(_m)\n SSQ = numpy.sum((model - y)**2)\n return SSQ",
"def _powerlaw(self, x: np.ndarray, y: np.ndarray) -> float:\n\n # regress\n def _regress(x, y):\n slope, intercept, rval, pval, err = linregress(x, y)\n return slope, rval\n\n # log of inputs\n logx = np.log(x)\n logy = np.log(y)\n\n # naive fit\n rmin = self.rmin\n if rmin is None:\n exponent, rval = _regress(logx, logy)\n return exponent\n\n # iteratively trim the fat tail\n for ymin in np.unique(y):\n\n # trim off the fat tail\n greater_than = y >= ymin\n logx_ = logx[greater_than]\n logy_ = logy[greater_than]\n exponent, rval = _regress(logx_, logy_)\n\n # check convergence\n if abs(rval) > rmin:\n return exponent\n\n # give up\n return np.nan",
"def df0dE(E, fermi, T):\n exponent = (E - fermi) / (k_B * T)\n if exponent > 40 or exponent < -40: # This is necessary so at too low numbers python doesn't return NaN\n return 1e-32\n else:\n return -1 / (k_B * T) * np.exp((E - fermi) / (k_B * T)) / (1 + np.exp((E - fermi) / (k_B * T))) ** 2",
"def g(F, fitting=False, use_lambda_fit=False, use_unwinding_fit=False):\n if fitting:\n return (S * C - C * F * (x/L_0 - 1 + 1/2\n * (k_B*T/(F*L_p))**(1/2))**(-1))**(1/2)\n if F <= 30e-12: # N\n return - 100e-21 # Nm\n else:\n g0 = - 590e-21 # Nm\n if use_lambda_fit:\n g0 = - 560e-21 # Nm\n if use_unwinding_fit:\n g0 = - 637e-21 # Nm\n return g0 + 17e-9 * F",
"def _evaluate(self, state):\n leading_power_error = self.get_leading_power_error(state)\n if np.isfinite(leading_power_error):\n return -float(leading_power_error)\n else:\n return self._default_value",
"def frexp(x):\n return 0.0, 0",
"def get_s( self ):\n\n # initialize scaling factor as unknown variable, assuming it's real and\n # greater than zero\n _s = Symbol( 's', real = True, positive = True )\n\n # solve for scaling factor (first argument is expression set equal to zero)\n s = solve( self.a * _s ** self.n + self.b * _s - 1, _s )\n\n # save result as float\n self.s = float( s[ 0 ] )",
"def _fit_function(self,x,a,b):\n return b + a*x",
"def d1(self):\n f = (self.rf + (self.sigma ** (2)) / 2 ) * self.t\n return (1/(self.sigma * (self.t ** (0.5)))) *(math.log(self.s/self.x) + f)",
"def sigma_xx_to_a_to_ff(self, Q, f):\n if f == \"e\":\n mf = me\n # gall = self.gaee\n elif f == \"mu\":\n mf = mmu\n # gall = self.gamumu\n mx = self.mx\n if Q >= 2.0 * mf and Q >= 2.0 * mx:\n # gaxx = self.gaxx\n # ma = self.ma\n # width_a = self.width_a\n ret_val = 0.0\n assert ret_val.imag == 0\n assert ret_val.real >= 0\n return ret_val.real\n else:\n return 0.0",
"def fdq1(f, x, h=1e-5):\n return (f(x+h) - f(x))/h\n \n raise NotImplementedError(\"Problem 2 Incomplete\")",
"def risefit(self, p, x, y, risepower, mode=0):\n assert mode in [-1, 0, 1]\n ix = np.argmin(np.fabs(x-p[2]))\n tm = np.zeros_like(x)\n expf = (x[ix:]-p[2])/p[1]\n pclip = 1.e3\n nclip = 0.\n expf[expf>pclip]= pclip\n expf[expf<-nclip] = -nclip\n tm[ix:] = p[0] * (1.0 - np.exp(-expf))**risepower\n if mode == 0:\n return tm - y\n elif mode == 1:\n return np.linalg.norm(tm-y)\n elif mode == -1:\n return tm\n else:\n raise ValueError('doubleexp: Mode must be 0 (diff), 1 (linalg.norm) or -1 (just value)')"
] | [
"0.6359497",
"0.630396",
"0.62164336",
"0.61789817",
"0.6004726",
"0.5962202",
"0.58648753",
"0.58410764",
"0.58384585",
"0.5783175",
"0.57613504",
"0.57225925",
"0.57216847",
"0.56861985",
"0.5686081",
"0.56694084",
"0.5659702",
"0.5622807",
"0.55963826",
"0.55789584",
"0.556907",
"0.55415297",
"0.552438",
"0.5497645",
"0.5493027",
"0.54883146",
"0.5477738",
"0.5477114",
"0.5450888",
"0.5450211"
] | 0.6419055 | 0 |
Process detectors from a stream of text data | def readTextStream(
self, stream, sourcename=None, postcheck=True, strict=True
):
if not isinstance(stream, io.TextIOBase):
raise TypeError("Stream is not a source of text data")
elif not stream.readable():
raise AttributeError("Stream is not readable")
detectors = self._read(stream, sourcename)
if postcheck and not detectors:
msg = "No detectors found in {}, named {}".format(
repr(stream), sourcename
)
if self.names:
msg += ". Processing detectors with names: {}".format(
", ".join(self.names)
)
if strict:
raise SerpentToolsException(msg)
warn(msg)
return detectors | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def data_preprocessing():\n lineid_content = get_lineid_content()\n print('Read movie_lines.txt file complete...')\n convos = get_convos()\n print('Read movie_conversations.txt file complete...')\n print('Building dataset')\n get_data(lineid_content, convos)",
"def infer():\n\n # Create StreamManagerApi object\n stream_manager_api = StreamManagerApi()\n # Use InitManager method init StreamManagerApi\n ret = stream_manager_api.InitManager()\n if ret != 0:\n print(\"Failed to init Stream manager, ret=%s\" % str(ret))\n exit()\n\n # create streams by pipeline config file\n with open(args.pipeline_path, \"rb\") as f:\n pipeline_str = f.read()\n\n # Configuring a stream\n ret = stream_manager_api.CreateMultipleStreams(pipeline_str)\n if ret != 0:\n print(\"Failed to create Stream, ret=%s\" % str(ret))\n exit()\n\n # Construct the input of the stream\n data_input = MxDataInput()\n # Stream_name encoded in UTF-8\n stream_name = args.stream_name.encode()\n print(stream_name)\n predictions = []\n with open(args.label_path, 'rt') as f:\n val_cls = f.read().rstrip(\"\\n\").split(\"\\n\")\n val_cls_dict = {}\n for i, cls in enumerate(val_cls):\n val_cls_dict[i] = cls\n coco_gt = COCO(args.instances_path)\n classs_dict = {}\n cat_ids = coco_gt.loadCats(coco_gt.getCatIds())\n for cat in cat_ids:\n classs_dict[cat[\"name\"]] = cat[\"id\"]\n\n for file_name in os.listdir(args.img_path):\n pred_data = []\n # Gets the Address of each image\n img_id = int(file_name.split('.')[0])\n file_path = args.img_path + file_name\n size = (cv2.imread(file_path)).shape\n\n # Read each photo in turn\n with open(file_path, \"rb\") as f:\n img_data = f.read()\n if not img_data:\n print(f\"read empty data from img:{file_name}\")\n continue\n # The element value img_data\n data_input.data = img_data\n boxes_output, scores_output = send_data_get_output(stream_name, data_input, stream_manager_api)\n pred_data.append({\"boxes\": boxes_output,\n \"box_scores\": scores_output,\n \"img_id\": img_id,\n \"image_shape\": size})\n\n parse_img_infer_result(pred_data[0], predictions, val_cls_dict, classs_dict)\n print(f\"Inferred image:{file_name} success!\")\n\n # Save the result in JSON format\n if not os.path.exists(args.res_path):\n os.makedirs(args.res_path)\n with open(args.res_path + 'predictions_test.json', 'w') as f:\n json.dump(predictions, f)\n stream_manager_api.DestroyAllStreams()",
"def _preprocess(self):\n self.data['sentences'] = self.data['text'].apply(self._tokenize_sent)\n self.data['nouns'] = self.data['sentences'].apply(self._get_nouns)\n # self._get_frequent_features()\n # self._compactness_pruning()\n # self._redundancy_pruning()\n # self._get_features()\n self._extract_opinions()",
"def process_text(self):\n prp1 = preprocessor.Preprocess()\n processed_text = prp1.clean_data(self.text)\n self.vec1 = self.vec.transform(pd.Series(processed_text))",
"def __read_data__(self):\n with open(self.file, 'r') as data:\n sentence = []\n tags = []\n for line in data:\n terms = line.rstrip().split(WHITESPACE)\n for term in terms:\n word_tag = tuple(term.split(TAGCHAR))\n word = word_tag[0]\n tag = word_tag[1]\n self.word_tag_dict[word_tag] += 1\n self.tag_dict[tag] += 1\n self.__add_to_word_dict__(word, tag)\n if self.isNumberWord(word):\n self.numbers += 1\n if word[0].isupper() and len(sentence) > 0:\n self.cap_no_start += 1\n sentence.append(word)\n tags.append(tag)\n if tag == ENDOFSENTENCE:\n self.sentences.append(tuple(sentence))\n self.tags.append(tuple(tags))\n sentence = []\n tags = []",
"def preprocess(self, data):\n logger.info(str(data))\n text = data[0].get(\"data\")\n if text is None:\n text = data[0].get(\"body\") # with txt file\n if isinstance(text, dict):\n logger.info(\" ############## Got Dict !! ##########################\")\n input_text = text['text']\n else:\n input_text = text.decode('utf-8')\n max_length = int(self.setup_config[\"max_length\"])\n logger.info(\"Received text: '%s'\", input_text)\n\n logger.info(input_text)\n # input_text = \"안녕하세요? 반갑습니다. 오늘 날씨가 정말 끝내줘요. 너 너무 사랑스러워요\"\n inputs = self.tokenizer.encode(input_text, max_char_length=max_length, return_attention_mask=True)\n return inputs",
"def parser(sent_list): #input: list of sentences",
"def parse_stream(self, fd):\n count_all = count_match = 0\n for line in fd:\n count_all += 1\n if not re.search(self.filter, line):\n continue\n\n count_match += 1\n if self.str_replace:\n self.kafka_publish_message(re.sub(self.str_repl_src, self.str_repl_dst, line))\n else:\n self.kafka_publish_message(line)\n\n #u_print(\" Processor.parse_stream() - Lines: processed=[{}] matched=[{}]\".format(count_all, count_match))\n self.stats_update('lines', count_all)\n self.stats_update('lines_match', count_match)",
"def process(self):\n self.extract()\n self.transform()\n self.load()",
"def extract_detections(self):\n self.rescue_model.setInput(self.human_blob)\n self.predictions = self.rescue_model.forward()",
"def process_lines(self, lines):\n line_index = 0\n n_lines = len(lines)\n while line_index < n_lines:\n if lines[line_index].startswith(\"HIERARCHY\"):\n line_index = self._read_skeleton(lines, line_index, n_lines)\n if lines[line_index].startswith(\"MOTION\"):\n self._read_frametime(lines, line_index+2)\n line_index = self._read_frames(lines, line_index+3, n_lines)\n else:\n line_index += 1",
"def process(\n self,\n makeGlyphs=True,\n makeKerning=True,\n makeInfo=True,\n bendLocations=False,\n ):\n if self.logger:\n self.logger.info(\"Reading %s\", self.path)\n self.readInstances(\n makeGlyphs=makeGlyphs,\n makeKerning=makeKerning,\n makeInfo=makeInfo,\n bendLocations=bendLocations,\n )\n self.reportProgress(\"done\", 'stop')",
"def pipeline(file):\n # special processing is performed to avoid sentence boundaries after abbrevs\n doc = nlp(text_processing.preprocess_text_ents(file))\n grid = get_grid(doc)\n distrib = get_distrib(grid, doc)\n return get_feats(distrib)",
"def process_corpus(self):\n sentences = []\n sentence = []\n with open(str(self.file), encoding=self.encoding) as f:\n\n line = f.readline()\n\n while line:\n\n if line.startswith(\"#\"):\n line = f.readline()\n continue\n\n if line.strip().replace(\"\", \"\") == \"\":\n if len(sentence) > 0:\n self.infer_space_after(sentence)\n if self.tagging_scheme is not None:\n self.convert_tag_scheme(\n sentence, target_scheme=\"iobes\"\n )\n\n sentences.append(sentence)\n sentence = []\n\n else:\n fields = re.split(r\"\\s+\", line)\n token = fields[0] # text column\n token_tags = {\n v: fields[k]\n for k, v in self.columns.items()\n if v != \"text\"\n }\n sentence.append({\"name\": token, \"tags\": token_tags})\n\n line = f.readline()\n\n return sentences",
"def init_detector(config):\n\n crf_list = config[\"detection\"][\"crf_ner_list\"].split(\",\")\n crf_model_list = [load(crf) for crf in crf_list]\n\n crf_ner_classic = None\n if \"crf_ner_classic\" in config[\"detection\"]:\n crf_ner_classic_list = config[\"detection\"][\n \"crf_ner_classic\"].split(\",\")\n crf_ner_classic = [load(crf) for crf in crf_ner_classic_list]\n\n # search for mail list\n corp_mail_list = []\n if config[\"detection\"][\"corp_mail_list\"]:\n with open(config[\"detection\"][\"corp_mail_list\"], \"r\") as f_in:\n for line in f_in:\n line = line.rstrip(\"\\n\")\n corp_mail_list.append(line)\n\n # build the system here\n nlp = None\n if \"nlp_model\" in config[\"detection\"]:\n nlp = spacy.load(config[\"detection\"][\"nlp_model\"])\n\n custom_word_list = []\n\n if \"custom_word_list\" in config:\n with open(config[\"custom_word_list\"], \"r\") as f_in:\n custom_word_list = [line.rstrip(\"\\n\") for line in f_in]\n\n # configuration of the proximity regexp\n regexp_config_dict = OrderedDict()\n if \"proximity_regexp_config\" in config:\n for key in config[\"proximity_regexp_config\"]:\n regexp_config_dict[key] = OrderedDict()\n regexp_config_dict[key][\"left_span_len\"] = int(\n config[\"proximity_regexp_config\"][key][\"left_span_len\"])\n\n regexp_config_dict[key][\"right_span_len\"] = int(\n config[\"proximity_regexp_config\"][key][\"right_span_len\"])\n\n with open(config[\n \"proximity_regexp_config\"][key][\"word_file\"], \"r\") as f_in:\n word_list = [normalize_text_proximity(\n line.rstrip(\"\\n\").strip()) for line in f_in]\n\n regexp_config_dict[key][\"word_list\"] = word_list\n\n low_priority_list = None\n if \"low_priority_list\" in config:\n low_priority_list = config[\"low_priority_list\"]\n\n my_detector = Detector(nlp,\n crf_model_list,\n load(config[\n \"detection\"][\"personal_email_detection\"]),\n crf_ner_classic,\n corp_mail_list=corp_mail_list,\n custom_word_list=custom_word_list,\n regexp_config_dict=regexp_config_dict,\n signature_max_distance=config[\"signature_max_distance\"],\n low_priority_list=low_priority_list)\n\n return my_detector",
"def dissect(self, text):",
"def process_image(self):\n\n detect.main(self.nn_args)",
"def _read_data_taskA(data_path: str=\"path\", tokenizer=None, \n bert: bool=False, \n mode: str=\"raw\", \n tagger=None, \n test: bool=False, \n test_samples=None\n ):\n print(f\"\\n[dataset]: Loading data from '{data_path}'...\")\n sentences = []\n labels = []\n tok_list = []\n words_list = []\n targets_list = []\n target_final = []\n\n data_dict = read_json_data(data_path) if not test else test_samples\n #print(\"data_dict:\", len(data_dict))\n\n for entry in data_dict:\n # tokenize data sentences\n if bert:\n tokens = tokenizer.tokenize(entry[\"text\"])\n tokens.insert(0, \"[CLS]\") # RoBERTa \"<s>\" <-> BERT \"[CLS]\" \n tokens.append(\"[SEP]\") # RoBERTa \"</s>\" <-> BERT \"[SEP]\"\n else:\n tokens = tokenizer(entry[\"text\"])\n \n words_list.extend(tokens)\n tok_list.append(tokens)\n\n if mode == \"tokenize\":\n sentences.append(tokens)\n elif mode == \"raw\":\n sentences.append(entry[\"text\"])\n\n # count target words\n t_list = []\n if not test:\n targets = entry[\"targets\"]\n tgt_list = []\n if len(targets) > 0:\n t_list.append(targets)\n for tgt in targets:\n targets_list.append(tgt[1])\n tgt_list.append(tgt[1])\n else:\n t_list.append([])\n\n # tag input tokens\n b_tok = tokenizer if bert else None\n tags = tagger(targets, tokens, bert_tokenizer=b_tok)\n #print(tags)\n\n labels.append(tags)\n target_final.append(tgt_list)\n\n else:\n labels.append(\"dummy\")\n target_final.append(0)\n \n if not test: \n assert len(sentences) == len(labels)\n print(\"sentences:\",len(sentences))\n print(\"labels:\",len(labels))\n\n # count words occurency and frequency \n word_counter = collections.Counter(words_list)\n distinct_words = len(word_counter)\n print(f\"Number of distinct words: {distinct_words}\")\n \n # count target words occurency and frequency\n tgts_counter = collections.Counter(targets_list)\n distinct_tgts = len(tgts_counter)\n print(f\"Number of distinct targets: {distinct_tgts}\")\n\n return sentences, labels, targets_list, word_counter\n else:\n return list(zip(sentences, labels, target_final, tok_list))",
"def extract_text_recognition_dataset(self, path):\n\n os.makedirs(os.path.join(path, 'images'))\n\n annotation = []\n\n for frame in tqdm(self.annotation['images']):\n image = cv2.imread(frame['file_name'], cv2.IMREAD_IGNORE_ORIENTATION | cv2.IMREAD_COLOR)\n for ann_id in self.img_id_2_ann_id[frame['id']]:\n obj = self.annotation['annotations'][ann_id]\n if obj['attributes']['legible']:\n bbox = obj['bbox']\n try:\n transcription = obj['attributes']['transcription']\n if transcription.isalnum():\n coord_x1, coord_y1, coord_x2, coord_y2 = bbox[0], bbox[1], bbox[0] + \\\n bbox[2], bbox[1] + bbox[3]\n coord_x1 = max(0, coord_x1)\n coord_x2 = min(image.shape[1] - 1, coord_x2)\n coord_y1 = max(0, coord_y1)\n coord_y2 = min(image.shape[0] - 1, coord_y2)\n crop_path = os.path.join(path, 'images', f'image{len(annotation)}.jpg')\n annotation.append(f'{crop_path} {transcription}')\n cv2.imwrite(crop_path, image[coord_y1:coord_y2, coord_x1:coord_x2])\n except KeyError:\n print('Missing transcription in ', frame['file_name'])\n break\n except IndexError:\n print('Error in image processing ', frame['file_name'])\n break\n\n with open(os.path.join(path, 'annotation.txt'), 'w') as file:\n file.write('\\n'.join(annotation))",
"def extract_face_detections(self):\n self.detector.setInput(self.image_blob)\n self.detections = self.detector.forward()",
"def tokenize(self, path):\n # Convert class 1,2 to 0,1\n # print(\"Convert class 1,2 to 0,1\")\n # Convert class 1,2 to 0,1\n dropped = cropped = 0\n oov_count = 0.\n word_count = 0.\n with open(path, 'r') as f:\n linecount = 0\n lines = []\n tags = []\n for line in f:\n linecount += 1\n if self.max_lines > 1 and linecount >= self.max_lines:\n break\n if self.lowercase:\n words = line.lower().strip().split()\n else:\n words = line.strip().split()\n tag, words = int(words[0]), words[1:]\n\n # if applying BPE\n if self.apply_bpe:\n words = [bp for word in words\n for bp in self.bpemb_en.encode(word)]\n\n if len(words) > self.maxlen:\n cropped += 1\n words = words[:self.maxlen]\n# try:\n# crop_words = words[:maxlen]\n# last_period = max(rindex(crop_words, '.'), rindex(crop_words, '!'), rindex(crop_words, ','))\n# except:\n# last_period = self.maxlen\n# if last_period < 10:\n# print(\"Sentence too short! {}\".format(words))\n# words = words[:last_period]\n if len(words) < 3:\n dropped += 1\n# print(words)\n continue\n words = ['<sos>'] + words\n words += ['<eos>']\n\n # vectorize\n vocab = self.dictionary.word2idx\n unk_idx = vocab['<oov>']\n indices = [vocab[w] if w in vocab else unk_idx for w in words]\n word_count += len(indices)\n oov_count += sum([1 if ii==unk_idx else 0 for ii in indices])\n # add to output list\n lines.append(indices)\n # Convert class 1,2 to 0,1\n # tag = tag - 1\n tags.append(tag)\n # tags = to_class_id(tags)\n print(\"Number of sentences cropped from {}: {} out of {} total, dropped {}. OOV rate {:.3f}\".\n format(path, cropped, linecount, dropped, oov_count/word_count))\n\n return list(zip(tags, lines))",
"def run():\n logger.info(f\"Process started:\")\n logger.info(f\"Converting Glove file to Word2Vec format\")\n convert_to_word2vec.convert(\n \"./data/source/glove.6B.50d.txt\", \"./data/source/glove.6B.50d.w2vformat.txt\"\n )\n\n logger.info(f\"Extracting Click Stream data\")\n extract_click_stream_data()\n\n logger.info(\"Extracting Wiki articles\")\n extract_wiki_articles()\n\n logger.info(f\"Generating Clickstream dataset\")\n generate_datasets()\n\n logger.info(\"Tokenizing articles\")\n WikiArticlesTokenizer().process()\n\n logger.info(\"Creating dataset with Wiki Articles\")\n create_wiki_articles_dataset()",
"def _preprocess(self, txt_seq):\n input = []\n label = []\n punc = \" \"\n for token in txt_seq.split():\n if token in self.punc2id:\n punc = token\n else:\n input.append(self.word2id.get(token, self.word2id[\"<UNK>\"]))\n label.append(self.punc2id[punc])\n punc = \" \"\n input.append(self.word2id[\"<END>\"])\n label.append(self.punc2id[punc])\n input = torch.LongTensor(input)\n label = torch.LongTensor(label)\n # input = np.array(input)\n # label = np.array(label)\n return input, label",
"def process(self, message, **kwargs):\n if self.classifier is None:\n self.train()\n\n if message.get(\"text\") is not None:\n sid = SentimentIntensityAnalyzer()\n res = sid.polarity_scores(message.get(\"text\"))\n key, value = max(res.items(), key=lambda x: x[1])\n\n if key == \"pos\":\n key = \"Positive\"\n elif key == \"neg\":\n key = \"Negative\"\n else:\n key = \"Neutral\"\n\n custom_tokens = self.remove_noise(word_tokenize(message.get(\"text\")))\n t = self.classifier.prob_classify(dict([token, True] for token in custom_tokens))\n\n sentiment = 'Positive' if t.prob('Positive') > t.prob('Negative') else 'Negative'\n confidence = max(t.prob('Positive'), t.prob('Negative'))\n\n found, entry = self.manager.getMovieName(message.get(\"text\"))\n movie = str(entry['original_title'].item())\n \n genre_entry, aux_found_genre = self.manager.fuzzy_find_genre(message.get(\"text\"), with_ratio=True)[0]\n genre = genre_entry\n \n\n if len(message.get(\"text\")) > 20:\n entity = self.convert_to_rasa(sentiment, confidence, name=\"our_sentiment_extractor\")\n else:\n entity = self.convert_to_rasa(key, value, name=\"builtin_sentiment_extractor\")\n\n message.set(\"sentiment\", [entity], add_to_output=True)\n\n entity = self.convert_movie_to_rasa(movie, found)\n message.set(\"movies\", [entity], add_to_output=True)\n\n if message.get(\"text\").strip() == \"no\":\n found_genre = False\n else:\n found_genre = True if aux_found_genre > 80 else False\n\n entity = self.convert_movie_to_rasa(genre, found_genre, entity=\"genres_detected\")\n print(entity)\n message.set(\"genres\", [entity], add_to_output=True)",
"def process(self, tweet):\n\n #identify the applicable event keywords for this text\n text = self.cleanup_data(tweet.text)\n tokens = [str(t.lower()).translate(None, string.punctuation) for t in tweet.text.split()]\n applicable_tokens = []\n for phrase in self.match_event_tree.root.keywords:\n if phrase in \" \".join(tokens):\n applicable_tokens.append(phrase)\n\n self.match_event_tree.propogate_tweet(applicable_tokens, tweet)",
"def parse_text(self, source):\r\n\r\n global word_set\r\n line_count = 0\r\n word_count = 0\r\n self.vowels = self.analyse_vowels(source)\r\n\r\n with open(source) as f:\r\n for line in f:\r\n # Detect end of paragraph\r\n if line_count and not line.strip() or line.startswith(\"\\t\"):\r\n self.paragraph_sizes.add(line_count)\r\n line_count = 0\r\n \r\n words = line.split()\r\n for word in words:\r\n if not word:\r\n continue\r\n self.word_sizes.add(len(word))\r\n construction = self.calculate_construction(word)\r\n self.word_constructions.add(construction)\r\n word_count += 1\r\n\r\n # Check if this is the end of a line.\r\n if word[-1] in self.ENDING_PUNCTUATION:\r\n line_count += 1\r\n self.sentence_sizes.add(word_count)\r\n word_count = 0\r\n\r\n \r\n if not self.paragraph_sizes.is_empty():\r\n # Liable to not parse in certain sources.\r\n self.paragraph_sizes = probabilities.PARAGRAPH_SIZES",
"def processText(text):\n print(type(text))\n for line in text:\n print(line)\n return text",
"def main(path):\n logger.info(f'Processing video file {path}')\n # Extract audio\n audio_file = extract_audio(path, pipeline_config.audio_target_dir)\n\n # Generate sound classification results and speech recogniser results\n sound_results = SoundRecogniser().process_file(audio_file)\n sound_results = process_overlap(sound_results)\n speech_results = SpeechRecogniser().process_file(audio_file)\n\n # NLP\n wrds = get_words(speech_results)\n nlp = SpaCyNaturalLanguageProcessor(pipeline_config.spacy_model)\n custom_nlp = SpaCyNaturalLanguageProcessor(pipeline_config.custom_spacy_model)\n processor = nlp.get_spacy_results_processor(wrds, speech_results)\n custom_processor = custom_nlp.get_spacy_results_processor(wrds, speech_results)\n chunk_results = processor.process_speech_results_chunk()\n ner_results = processor.process_speech_results_ner()\n ner_results.extend(custom_processor.process_speech_results_ner())\n match_results = processor.process_speech_results_match()\n speech_results = nlp.process_spurious_words(speech_results, chunk_results)\n\n # Add Speech recogniser results, sound classification results and NLP results to a subtitle file\n subs_1 = save_to_subtitles(speech_results,\n lambda speech_result: speech_result['word'])\n subs_1 = compress_subs(subs_1)\n subs_2 = save_to_subtitles(sound_results,\n lambda sound_result: sound_result['class'])\n subs_2 = flatten_subs(subs_2)\n subs_3 = save_to_subtitles(chunk_results,\n lambda chunk_result: f'{chunk_result[\"word\"]} ({chunk_result[\"head\"]})')\n subs_4 = save_to_subtitles(ner_results,\n lambda ner_result: f'{ner_result[\"type\"]} {ner_result[\"word\"]}')\n subs_5 = save_to_subtitles(match_results,\n lambda match_result: match_result[\"word\"])\n\n combined_subs = append_subs(None, subs_1, style='bottom')\n combined_subs = append_subs(combined_subs, subs_2, exclude=['bottom'], style='top', formatter=lambda x: f'({x})')\n combined_subs = append_subs(combined_subs, subs_3, style='left')\n combined_subs = append_subs(combined_subs, subs_4, style='right')\n combined_subs = append_subs(combined_subs, subs_5, style='bottom_left_pred')\n combined_subs = remove_tiny_subs(combined_subs, duration_millis=1000, left_millis=None,\n right_millis=None, style='top')\n subtitle_file_name = os.path.splitext(path)[0] + '.ass'\n create_styles(combined_subs)\n combined_subs.save(subtitle_file_name)\n\n # Burn to a video\n burn_subtitles_into_video(path, subtitle_file_name, pipeline_config.audio_target_dir)\n logger.info(f'Done processing {audio_file}')",
"def process_raw_phrases(file_path):",
"def process_tags(self):\n nolf = self.unixtext.replace(\"\\n\", \" \")\n res = EMERGENCY_RE.findall(nolf)\n if res:\n # TODO: this can be based off the IBW Tags too\n self.is_emergency = True\n match = WINDHAIL.match(nolf)\n if match:\n gdict = match.groupdict()\n self.windtag = gdict['wind']\n self.windtagunits = gdict['windunits']\n self.haildirtag = gdict['haildir']\n self.winddirtag = gdict['winddir']\n self.hailtag = gdict['hail']\n\n match = WINDTAG.match(nolf)\n if match:\n gdict = match.groupdict()\n self.winddirtag = gdict['winddir']\n self.windtag = gdict['wind']\n self.windtagunits = gdict['windunits']\n\n match = HAILTAG.match(nolf)\n if match:\n gdict = match.groupdict()\n self.haildirtag = gdict['haildir']\n self.hailtag = gdict['hail']\n\n match = TORNADOTAG.match(nolf)\n if match:\n gdict = match.groupdict()\n self.tornadotag = gdict['tornado']\n\n match = TORNADODAMAGETAG.match(nolf)\n if match:\n gdict = match.groupdict()\n self.tornadodamagetag = gdict['damage']\n\n match = WATERSPOUTTAG.match(nolf)\n if match:\n gdict = match.groupdict()\n self.waterspouttag = gdict['waterspout']\n\n for token in FLOOD_TAGS.findall(self.unixtext):\n self.flood_tags[token[0]] = token[1]"
] | [
"0.59176165",
"0.5905736",
"0.56918573",
"0.5629905",
"0.5600295",
"0.55916744",
"0.558541",
"0.5556407",
"0.55540735",
"0.5542036",
"0.5529023",
"0.54705584",
"0.5440531",
"0.543348",
"0.54040974",
"0.53689724",
"0.53610826",
"0.5352985",
"0.5328044",
"0.53233397",
"0.5319539",
"0.5318711",
"0.52999985",
"0.52706593",
"0.52669364",
"0.5238363",
"0.52366275",
"0.5230943",
"0.5226656",
"0.5209931"
] | 0.6518016 | 0 |
Return workflow history of this context, for all workflows in its chain. Taken from plone_scripts/getWorkflowHistory.py | def workflowHistory(self, complete=True):
context = aq_inner(self.context)
# Since switching to DCWorkflow's getInfoFor, we rely on its
# permission checks.
#if not (_checkPermission('Request review', context) or
# _checkPermission('Review portal content', context)):
# return []
wf_tool = getToolByName(context, 'portal_workflow')
membership = getToolByName(context, 'portal_membership')
workflows = wf_tool.getWorkflowsFor(self.context)
review_history = []
try:
# get total history
for wf in workflows:
wf_review_history = wf.getInfoFor(context,
'review_history', [])
# Add in the state_var, to find the title and use in template
for item in wf_review_history:
item['state_var'] = wf.state_var
review_history.extend(wf_review_history)
if not complete:
# filter out automatic transitions.
review_history = [r for r in review_history if r['action']]
else:
review_history = list(review_history)
portal_type = context.portal_type
anon = _(u'label_anonymous_user', default=u'Anonymous User')
for r in review_history:
r['type'] = 'workflow'
r['transition_title'] = wf_tool.getTitleForTransitionOnType(
r['action'], portal_type) or _("Create")
r['state_title'] = wf_tool.getTitleForStateOnType(
r[r['state_var']], portal_type)
actorid = r['actor']
r['actorid'] = actorid
if actorid is None:
# action performed by an anonymous user
r['actor'] = {'username': anon, 'fullname': anon}
r['actor_home'] = ''
else:
r['actor'] = membership.getMemberInfo(actorid)
if r['actor'] is not None:
r['actor_home'] = self.navigation_root_url + '/author/' + actorid
else:
# member info is not available
# the user was probably deleted
r['actor_home'] = ''
review_history.reverse()
except WorkflowException:
log('plone.app.layout.viewlets.content: '
'%s has no associated workflow' % context.absolute_url(),
severity=logging.DEBUG)
return review_history | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getProcessingHistoryList(context):\n projectDir = context.projectDir\n steps = []\n history = GenericMetadata._readEntriesForSection(projectDir, GenericMetadata.HISTORY_SECTION)\n try:\n idx = int(history['numsteps']) + 1\n for i in xrange(1, idx):\n key = GenericMetadata.HISTORY_PROTO + str(i)\n steps.append(history[key])\n except KeyError:\n pass\n \n return steps",
"def get_history(self):\n return self.history",
"def get_workflows(self):\n return self._data_dict[self.KEY_BI_WORKFLOWS]",
"def history(self):\n return self.info['history']",
"def history(self):\n return self._history",
"def history(self):\n return self._history",
"def get_history(self):\n return self.__history[:]",
"def get_order_history(self):\n return self.__call__('orders', 'getorderhistory')",
"def history(self):\n return self.board.history",
"def stack(self):\n return self.history",
"def task_history(self):\n return self._task_history",
"def get_workflow_steps(self):\n return self._data_dict[self.KEY_WF_STEPS]",
"def getWorkflowSteps(self):\n\n return self.dbase.getProcessSteps(self.scene)",
"def getOrderHistory(self):\n return self.__orderhistory",
"def get_workflows(self):\n try:\n result = self._session.query(WorkflowEntity).all()\n result_dict = self.result_dict(result)\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return result_dict",
"def history(self):\n return _spacegrant_swig.hdlc_framer_sptr_history(self)",
"def history(self):\n alembic.command.history(self.alembic_config(), verbose=True)",
"def getModelHistory(self, *args):\n return _libsbml.SBase_getModelHistory(self, *args)",
"def get_history(self):\r\n\r\n return self.board_history",
"def get_action_history(self):\n\t\treturn self._action_history",
"def history(self):\n raise NotImplementedError\n # from domonic.webapi.history import History\n # return History()",
"def History(self):\n return self.historydict.get('history', [])",
"def workflow_tests(self):\n return self._workflows.copy()",
"def get_history(cls, **filters) -> List[dict]:\n return cls.get_all(**filters)",
"def get_history(self, taxlot_view):\n history = []\n\n def record_dict(log):\n filename = None if not log.import_filename else path.basename(log.import_filename)\n if filename:\n # Attempt to remove NamedTemporaryFile suffix\n name, ext = path.splitext(filename)\n pattern = re.compile('(.*?)(_[a-zA-Z0-9]{7})$')\n match = pattern.match(name)\n if match:\n filename = match.groups()[0] + ext\n return {\n 'state': TaxLotStateSerializer(log.state).data,\n 'date_edited': convert_to_js_timestamp(log.created),\n 'source': log.get_record_type_display(),\n 'filename': filename,\n # 'changed_fields': json.loads(log.description) if log.record_type == AUDIT_USER_EDIT else None\n }\n\n log = TaxLotAuditLog.objects.select_related('state', 'parent1', 'parent2').filter(\n state_id=taxlot_view.state_id\n ).order_by('-id').first()\n master = {\n 'state': TaxLotStateSerializer(log.state).data,\n 'date_edited': convert_to_js_timestamp(log.created),\n }\n\n # Traverse parents and add to history\n if log.name in ['Manual Match', 'System Match', 'Merge current state in migration']:\n done_searching = False\n while not done_searching:\n if (log.parent1_id is None and log.parent2_id is None) or log.name == 'Manual Edit':\n done_searching = True\n elif log.name == 'Merge current state in migration':\n record = record_dict(log.parent1)\n history.append(record)\n if log.parent1.name == 'Import Creation':\n done_searching = True\n else:\n tree = log.parent1\n log = tree\n else:\n tree = None\n if log.parent2:\n if log.parent2.name in ['Import Creation', 'Manual Edit']:\n record = record_dict(log.parent2)\n history.append(record)\n elif log.parent2.name == 'System Match' and log.parent2.parent1.name == 'Import Creation' and \\\n log.parent2.parent2.name == 'Import Creation':\n # Handle case where an import file matches within itself, and proceeds to match with\n # existing records\n record = record_dict(log.parent2.parent2)\n history.append(record)\n record = record_dict(log.parent2.parent1)\n history.append(record)\n else:\n tree = log.parent2\n if log.parent1.name in ['Import Creation', 'Manual Edit']:\n record = record_dict(log.parent1)\n history.append(record)\n else:\n tree = log.parent1\n\n if not tree:\n done_searching = True\n else:\n log = tree\n elif log.name == 'Manual Edit':\n record = record_dict(log.parent1)\n history.append(record)\n elif log.name == 'Import Creation':\n record = record_dict(log)\n history.append(record)\n\n return history, master",
"def history_orders(self, **params):\n return self._get('historyOrders', signed=True, params=params)",
"def history(self):\n return _spacegrant_swig.NRZI_sptr_history(self)",
"def history(self) -> \"unsigned int\":\n return _beamforming_swig.beamformer_sptr_history(self)",
"def history(self, maxresults=None, mindate=None):\n hist = []\n for server in self.servers:\n hist.extend(server.history(maxresults=maxresults, mindate=mindate))\n return hist",
"def get_state(self):\n return self.history"
] | [
"0.6612526",
"0.6584411",
"0.65203327",
"0.6433324",
"0.639691",
"0.639691",
"0.6346261",
"0.62524766",
"0.6220365",
"0.618586",
"0.61093587",
"0.60434985",
"0.60136586",
"0.5986631",
"0.59849703",
"0.5923301",
"0.5921552",
"0.59123814",
"0.58308953",
"0.57951367",
"0.57912314",
"0.57712036",
"0.5750684",
"0.57463926",
"0.5725021",
"0.5715015",
"0.5710659",
"0.5695353",
"0.5689704",
"0.56629765"
] | 0.668929 | 0 |
Build a URI template using the url_key and constants from the API definition found in const.py | def build_uri_template(url_key: str) -> URITemplate:
_skeleton = ''.join([API_PATH['base'], API_PATH[url_key]])
_template = URITemplate(_skeleton)
return _template | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def uri_template(app, **kwargs):\n assert len(kwargs) == 1\n\n endpoint = kwargs.keys()[0]\n parameters = kwargs.values()[0]\n\n for url in app.url_map.iter_rules():\n if url.endpoint == endpoint:\n break\n else:\n return ''\n\n ut = url.rule\n\n for param, replacement in parameters.items():\n ut = ut.replace(\n '<{}>'.format(param), '{' + replacement + '}')\n\n return urljoin(request.url_root, ut)",
"def _url_builder(url_root,api_key,path,params):\n params['api_key'] = api_key\n url_end = urlencode(params)\n url = \"%s%s%s\" % (url_root,path,url_end)\n return url",
"def build_api_url(project, method, base_url):\n return API_URL_TEMPLATE.format(\n api_base=base_url, api_version=API_VERSION, project=project, method=method\n )",
"def new_url(module):\n # To create the URL, we need to take the format string and return it with no params\n url_base = \"/axapi/v3/ddos/template/tcp\"\n\n f_dict = {}\n f_dict[\"name\"] = \"\"\n\n return url_base.format(**f_dict)",
"def new_url(module):\n # To create the URL, we need to take the format string and return it with no params\n url_base = \"/axapi/v3/fw/template/logging/{name}\"\n f_dict = {}\n \n f_dict[\"name\"] = \"\"\n\n return url_base.format(**f_dict)",
"def _generate_url(self, endpoint:str, params:Dict[str, str]=None) -> str:\n if params:\n return f\"{self.BASE_URL}/{self._api_version}{endpoint}?{urlencode(params)}\"\n return f\"{self.BASE_URL}/{self._api_version}{endpoint}\"",
"def _build_uri(self, **kwargs):\n target_uri, version = str(), None\n\n if kwargs.get('category') not in ['performance', 'common']:\n version = self._build_uri_get_version(kwargs.get('version'),\n kwargs.get('no_version'))\n if version:\n target_uri += '/{version}'.format(version=version)\n\n target_uri += '/{category}'.format(\n category=kwargs.get('category'))\n\n if kwargs.get('resource_level'):\n target_uri += '/{resource_level}'.format(\n resource_level=kwargs.get('resource_level'))\n\n if kwargs.get('resource_level_id'):\n target_uri += '/{resource_level_id}'.format(\n resource_level_id=kwargs.get('resource_level_id'))\n\n if kwargs.get('resource_type'):\n target_uri += '/{resource_type}'.format(\n resource_type=kwargs.get('resource_type'))\n if kwargs.get('resource_type_id'):\n target_uri += '/{resource_type_id}'.format(\n resource_type_id=kwargs.get('resource_type_id'))\n\n if kwargs.get('resource'):\n target_uri += '/{resource}'.format(\n resource=kwargs.get('resource'))\n if kwargs.get('resource_id'):\n target_uri += '/{resource_id}'.format(\n resource_id=kwargs.get('resource_id'))\n\n if kwargs.get('object_type'):\n target_uri += '/{object_type}'.format(\n object_type=kwargs.get('object_type'))\n if kwargs.get('object_type_id'):\n target_uri += '/{object_type_id}'.format(\n object_type_id=kwargs.get('object_type_id'))\n\n return target_uri",
"def __build_url(self, api_call, **kwargs):\n kwargs['key'] = self.api_key\n if 'language' not in kwargs:\n kwargs['language'] = self.language\n if 'format' not in kwargs:\n kwargs['format'] = self.__format\n api_query = urlencode(kwargs)\n\n return \"{0}{1}?{2}\".format(urls.BASE_URL,\n api_call,\n api_query)",
"def build_url(base_url, service, major_version, resource_type,\n parameters=None, service_mappings=None, subpath='fdsnws'):\n # Avoid mutable kwargs.\n if parameters is None:\n parameters = {}\n if service_mappings is None:\n service_mappings = {}\n\n # Only allow certain resource types.\n if service not in [\"dataselect\", \"station\"]:\n msg = \"Resource type '%s' not allowed. Allowed types: \\n%s\" % \\\n (service, \",\".join((\"dataselect\", \"station\")))\n raise ValueError(msg)\n\n # Special location handling.\n if \"location\" in parameters:\n loc = parameters[\"location\"].replace(\" \", \"\")\n # Empty location.\n if not loc:\n loc = \"--\"\n # Empty location at start of list.\n if loc.startswith(','):\n loc = \"--\" + loc\n # Empty location at end of list.\n if loc.endswith(','):\n loc += \"--\"\n # Empty location in middle of list.\n loc = loc.replace(\",,\", \",--,\")\n parameters[\"location\"] = loc\n\n # Apply per-service mappings if any.\n if service in service_mappings:\n url = \"/\".join((service_mappings[service], resource_type))\n else:\n if subpath is None:\n parts = (base_url, service, str(major_version),\n resource_type)\n else:\n parts = (base_url, subpath.lstrip('/'), service,\n str(major_version), resource_type)\n url = \"/\".join(parts)\n\n if parameters:\n # Strip parameters.\n for key, value in parameters.items():\n try:\n parameters[key] = value.strip()\n except Exception:\n pass\n url = \"?\".join((url, urlencode(parameters, safe=':,*')))\n \n return url",
"def test_generate_url_with_api_key():\n config = core.Config(api_key='FAKE')\n expected = \"{}?{}\".format(ENTREZ_URL, \"retmode=text&id=FAKE&db=nucleotide&api_key=FAKE&rettype=gbwithparts\")\n assert expected == core.generate_url(\"FAKE\", config)\n\n config.format = 'gff3'\n expected = \"{}?{}\".format(SVIEWER_URL, \"retmode=text&id=FAKE&db=nucleotide&api_key=FAKE&report=gff3\")\n assert expected == core.generate_url(\"FAKE\", config)",
"def _make_url(self):\n ...",
"def _build_api_request_uri(self, http_method=\"GET\"):\n return self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)",
"def _build_api_request_uri(self, http_method=\"GET\"):\n return self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)",
"def existing_url(module):\n # Build the format dictionary\n url_base = \"/axapi/v3/ddos/template/tcp/{name}\"\n\n f_dict = {}\n if '/' in str(module.params[\"name\"]):\n f_dict[\"name\"] = module.params[\"name\"].replace(\"/\", \"%2F\")\n else:\n f_dict[\"name\"] = module.params[\"name\"]\n\n return url_base.format(**f_dict)",
"def build_url(self, dict_args_in_out=None):\n if dict_args_in_out is None:\n dict_args_in_out = {}\n\n url = dict_args_in_out.pop('base_url', None) or ''\n url += '/%s' % self.collection_key\n\n # do we have a specific entity?\n entity_id = dict_args_in_out.pop('%s_id' % self.key, None)\n if entity_id is not None:\n url += '/%s' % entity_id\n\n return url",
"def make_url(api_key, url, args=None):\n if args is None:\n args = []\n argsep = '&'\n if '?' not in url:\n argsep = '?'\n if '?apiKey=' not in url and '&apiKey=' not in url:\n args.insert(0, ('apiKey', api_key))\n return url + argsep + '&'.join(['='.join(t) for t in args])",
"def _generate_url(action, query_params=None):\r\n if query_params:\r\n query_params = urllib.parse.urlencode(query_params)\r\n action = f\"{action}?{query_params}\"\r\n \r\n\r\n url = urllib.parse.urljoin(api_url, action)\r\n\r\n return url",
"def existing_url(module):\n # Build the format dictionary\n url_base = \"/axapi/v3/fw/template/logging/{name}\"\n f_dict = {}\n \n f_dict[\"name\"] = module.params[\"name\"]\n\n return url_base.format(**f_dict)",
"def aci_create_url(self, cfg, urltmpl):\n\n # url = urltmpl.render(cfg)\n j2varlist = []\n\n if not isinstance(urltmpl, list): # urltmpl is not a list\n urlTemplate = Template(urltmpl)\n else: # urltempl is a list\n for item in urltmpl: # find the proprer url template for the current cfg\n j2varlist = re.findall(r\"{{([a-zA-Z0-9]+)\\.\", item)\n j2itemfound = True\n for j2varitem in j2varlist: # are all j2 variables in cfg as keys ?\n if j2varitem not in cfg:\n j2itemfound = False # ... no, wrong URL template\n break\n if j2itemfound: # ... yes, the proper URL template was found\n urlTemplate = Template(item)\n break\n try:\n url = urlTemplate.render(cfg)\n except (UnboundLocalError, UndefinedError):\n print(\"Proper URL template was not found\", cfg)\n exit(1)\n return url",
"def generate_call_string(self):\n if(self.api_key is None):\n raise error(\"API Key is not defined\");#Should base class do this? \n \n self.call_url=self.baseurl;\n if hasattr(self,'search_str'):\n self.call_url+=self.search_str;\n if hasattr(self,'filter_field_str'):\n self.call_url=self.call_url+'&'+self.filter_field_str;\n \n #loop over the parameters dict\n for key in self.input_params:\n self.call_url+=self.input_params[key];\n \n #finally add api key. at this point already checked it exists\n self.call_url=self.call_url+'&'+\"api-key=\"+str(self.api_key);\n return;",
"def GenerateUrl():\n params = {}\n params['client_id'] = Constants.USER['CLIENT_ID']\n params['redirect_uri'] = Constants.AUTH['REDIRECT']\n params['scope'] = Constants.AUTH['SCOPE']\n params['response_type'] = 'code'\n return '%s?%s' % (Constants.OAUTH, FormatUrl(params))",
"def build_url(self, endpoint_url: str) -> str:\n return self.base_url + endpoint_url % self.instance_id",
"def _construct_url(self, endpoint):\n return self.base_url + self.api_path + endpoint.strip('/')",
"def __build_url(path, api_site_parameter, **params):\n \n query = [\"%s=%s\" % (key, params[key]) for key in params if (params[key] or key == 'pagesize') ]\n query_string = \"&\".join(query)\n url = \"%s/%s/%s?\" % (__api_endpoint, __api_version, path)\n url += query_string\n return url",
"def build_url(self, config, query):\n if(not os.environ['FLICKR_API_KEY']):\n raise ValueError('Environement variable \"FLICKR_API_KEY\" is empty')\n \n current_provider = [provider for provider in config['providers'] if provider['name'] == self.provider_name][0]\n current_provider['query']['text'] = str(query)\n current_provider['query']['api_key'] = os.environ['FLICKR_API_KEY']\n\n query_strings = helper.build_query_strings(current_provider['query'])\n\n return current_provider['base_url'] + query_strings",
"def new_url(**kwargs):\n # To create the URL, we need to take the format string and return it with no params\n url_base = \"/axapi/v3/export\"\n f_dict = {}\n\n return url_base.format(**f_dict)",
"def new_url(module):\n # To create the URL, we need to take the format string and return it with no params\n url_base = \"/axapi/v3/web-category/category-list\"\n\n f_dict = {}\n f_dict[\"name\"] = \"\"\n\n return url_base.format(**f_dict)",
"def _build_uri(self, uri_base, params):\n if not params:\n return uri_base\n else:\n uri_extension = \"?\"\n for param in params:\n uri_extension = uri_extension + param + \"&\"\n uri_extension = uri_extension[:-1] # clip off the final & \n uri = uri_base + uri_extension\n return uri",
"def _make_url(self, url_part, blueprint_prefix):\n parts = (blueprint_prefix, self.prefix, url_part)\n return ''.join(_ for _ in parts if _)",
"def existing_url(**kwargs):\n # Build the format dictionary\n url_base = \"/axapi/v3/export\"\n f_dict = {}\n\n return url_base.format(**f_dict)"
] | [
"0.709884",
"0.6977214",
"0.67404443",
"0.66753805",
"0.662737",
"0.6527455",
"0.64493567",
"0.642962",
"0.6384027",
"0.63768953",
"0.63635385",
"0.63031346",
"0.63031346",
"0.6255533",
"0.6241023",
"0.62348884",
"0.6225104",
"0.6216448",
"0.6147686",
"0.6134802",
"0.6097601",
"0.6078842",
"0.60750705",
"0.6073324",
"0.606907",
"0.60375667",
"0.60368234",
"0.60301054",
"0.6001283",
"0.5975404"
] | 0.8198724 | 0 |
/items/{type}/{no}/supersets (see Bricklink API) | def get_supersets(self, itemid: str, itemtypeid: str)->dict:
self.__validate(itemid=itemid, itemtype=itemtypeid)
url = build_uri_template('get_supersets').expand(type=itemtypeid, no=itemid)
logger.info("Getting supersets: {}".format(url))
data = self._get_data(url)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_subsets(self, itemid: str, itemtypeid: str)->dict:\n self.__validate(itemid=itemid, itemtype=itemtypeid)\n url = build_uri_template('get_subsets').expand(type=itemtypeid, no=itemid)\n logger.info(\"Getting subsets: {}\".format(url))\n data = self._get_data(url)\n return data",
"def resource_bundle(resource_type, methods=[\"GET\"]):\n token = validate_auth()\n url = current_app.config.get('MAP_API') + resource_type\n params = {'_count': 1000}\n params.update(request.args)\n resp = requests.get(url, auth=BearerAuth(token), params=params)\n try:\n resp.raise_for_status()\n except requests.exceptions.HTTPError as err:\n abort(err.response.status_code, err)\n\n return jsonify(resp.json())",
"def item_from_browse(request):\n\n result = item( request.user, request.POST['sku'] )\n\n return JSONHttpResponse(result)",
"def specialist_list(self, request, **dict):\n\t\tdata = self.get_serializer(self.get_queryset(), many=True).data\n\t\treturn Response(data, status.HTTP_200_OK)",
"def list_bundles():\n response = houston.get(\"/zipline/bundles\")\n\n houston.raise_for_status_with_json(response)\n return response.json()",
"def test_collection_viewset_list_superuser(logged_in_apiclient, settings):\n client, user = logged_in_apiclient\n user.is_superuser = True\n user.save()\n url = reverse(\"models-api:collection-list\")\n collections = [CollectionFactory(owner=user).hexkey for _ in range(5)]\n other_user = UserFactory()\n collections += [CollectionFactory(owner=other_user).hexkey]\n\n result = client.get(url)\n assert result.status_code == status.HTTP_200_OK\n assert len(result.data[\"results\"]) == 6\n for coll_data in result.data[\"results\"]:\n assert coll_data[\"key\"] in collections",
"def get_items():\n return requester.perform_request(Uri.items)",
"def showOwnedBundles(userId):\n url = f\"https://catalog.roblox.com/v1/users/{userId}/bundles?limit=100&sortOrder=Asc\"\n r = requests.get(url)\n j = json.loads(r.text)\n return j['data']",
"def item_from_request(request):\n\n result = item( request.user, request.POST['sku'] )\n\n return JSONHttpResponse(result)",
"def api_asset_list():\n return jsonify(app.bank.to_list()), 200",
"def edit_items(request):\n token = getToken(request)\n superUser = isSuperUser(token)\n if superUser == True:\n id = request.data['id']\n try:\n items = Items.objects.get(id=id)\n except:\n return Response(status=status.HTTP_404_NOT_FOUND)\n serializer = ItemsSerializer(items, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response(status=status.HTTP_401_UNAUTHORIZED)",
"def request_subset_edit(self, request):\n user_id = request['user_id']\n workspace_uuid = request['workspace']['uuid']\n request_list = None\n if 'subsets' in request.keys():\n request_list = request['subsets']\n response = self.list_subsets(workspace_unique_id=workspace_uuid, user_id=user_id, request=request_list)\n \n return response",
"def create_subsets(subsets):\n index = 0\n docs = []\n for name,_,stype in subsets:\n record = {\n \"_id\": name,\n \"type\": stype\n }\n docs.append(record)\n index+=1\n return docs",
"def find_bundles_for_url(request):\n\n # get/create link for given url\n url = request.query_params.get('url', None)\n\n # validate url is a url\n v = URLValidator()\n\n try:\n v(url)\n except ValidationError as exc:\n # the user must be joking\n return Response({'error': True, 'msg': 'Invalid URL'}, status=400)\n\n # normalize url for tidyness\n url = urltools.normalize(url)\n\n try:\n link = Link.objects.get(url=url)\n except Link.DoesNotExist:\n return Response([])\n\n # find all bundle memberships for this link\n memberships = (BundleLink.objects\n .filter(link=link)\n .only('bundle_id')\n .distinct())\n\n # fetch all bundle-link pairs for bundles containing this link\n bundle_ids = [m.bundle_id for m in memberships]\n all_links = (BundleLink.objects\n .filter(bundle_id__in=bundle_ids)\n .select_related('bundle', 'link', 'curator'))\n\n # group bundlelinks by bundle - <bundle: [bundlelink, ...]>\n grouped = itertools.groupby(all_links, key=operator.attrgetter('bundle'))\n\n output = []\n\n for bundle, link_list in grouped:\n setattr(bundle, 'link_list', link_list)\n serialized = BundleSerializer(bundle)\n output.append(serialized.data)\n\n return Response(output)",
"def test_resource_collection_get_bundles(self):\n bundle = {\n 'resourceType': 'Bundle',\n 'entry': [\n {\n 'resource': {\n 'resourceType': 'ValueSet',\n 'id': 'example-extensional',\n 'url': 'http://value-in-a-bundle',\n 'status': 'draft',\n }\n }\n ],\n }\n\n collection = fhir_package.ResourceCollection(\n self._valueset_cls, self._primitive_handler, 'Z'\n )\n collection.put(bundle['entry'][0]['resource'], bundle)\n resource = collection.get('http://value-in-a-bundle')\n\n self.assertIsNotNone(resource)\n self.assertTrue(proto_utils.is_message_type(resource, self._valueset_cls))\n self.assertEqual(resource.id.value, 'example-extensional')\n self.assertEqual(resource.url.value, 'http://value-in-a-bundle')",
"def request_subset_list(self, request):\n user_id = request['user_id']\n workspace_uuid = request['workspace_uuid'] \n \n # Initiate structure \n response = {'workspace': {}, \n 'subsets': []}\n \n # Add workspace info\n response['workspace'] = self.dict_workspace(unique_id=workspace_uuid, user_id=user_id)\n \n subset_list = self.list_subsets(workspace_unique_id=workspace_uuid, user_id=user_id)\n \n # Add subset info \n response['subsets'] = subset_list\n \n return response",
"def list_subsets(self, workspace_unique_id=None, user_id=None, request=None):\n# print('list_subsets_request', request)\n subset_list = []\n# subset_uuid_list = [] \n# sub_request_list = []\n request_for_subset_uuid = self._get_mapping_for_name_in_dict('uuid', request)\n# subset_uuid_list.append(sub['uuid'])\n# sub_request_list.append(sub)\n# else: \n# subset_uuid_list = self.get_subset_list(workspace_unique_id=workspace_unique_id, user_id=user_id)\n# sub_request_list = [None]*len(subset_uuid_list)\n \n# for subset_uuid, sub_request in zip(subset_uuid_list, sub_request_list): \n# print('=====SUBSET_UUID=====')\n# print(workspace_unique_id)\n# print(user_id)\n# print(self.workspaces)\n# print('=====================')\n for subset_uuid in self.get_subset_list(workspace_unique_id=workspace_unique_id, user_id=user_id):\n print('=====SUBSET_UUID', '\"{}\"'.format(subset_uuid))\n sub_request = request_for_subset_uuid.get(subset_uuid, {})\n \n # Check uuid for subset in request (if given) \n# if request:\n# for sub in request:\n# # print(sub)\n# if sub['uuid'] == subset_uuid:\n# break\n \n # Get subset dict\n subset_dict = self.dict_subset(workspace_unique_id=workspace_unique_id, \n subset_unique_id=subset_uuid, \n request=sub_request)\n \n \n \n # Add subset dict to subset list\n subset_list.append(subset_dict)\n \n return subset_list",
"def getItems(self): \n items = []\n if self.itemCount > 0:\n \n site = getSite()\n \n \n # Make string path relative to the site root\n # E.g. string path \"news\" becomes \"/yoursiteid/news\"\n site_path = site.getPhysicalPath();\n \n path = \"/\".join(site_path) + \"/\" + self.path \n \n #if self.itemPortalType2 != None:\n # types.append(self.itemPortalType2) \n \n #print \"Querying by:\" + type + \" \" + path\n content_by_type = self.context.portal_catalog(path={ \"query\": path, \"depth\" :9 }, \n sort_on=\"created\", \n sort_order=\"reverse\")[0:self.itemCount]\n\n \n items += [ brain.getObject() for brain in content_by_type ]\n\n return items",
"def get_rest_list(request):\n if request.method == \"GET\":\n rest_list = Package.objects.order_by('-location')\n serializer = PackageSerializer(rest_list, many=True)\n return JsonResponse(serializer.data, safe=False)",
"def api_subset_lookup(cur):\n if 'api_subset' not in _tables:\n rclass = rclass_id_lookup(cur)\n _tables['api_subset'] = dict((k, rclass[v]) for k, v in API_SUBSETS.items())\n return _tables['api_subset']",
"def list(self, request):\n urls = {\n 'msg': 'Must use bulk_by_sample to get SCCmec Subtype hits',\n }\n\n return Response(urls)",
"def test_json(testapp, item_type):\n res = testapp.get('/' + item_type).follow(status=200)\n assert (item_type + 'Collection') in res.json['@type']",
"def item_subadres_adapter(obj, request):\n return {\n 'id': obj.id,\n 'subadres': obj.subadres,\n 'postadres': obj.postadres,\n 'status': {\n 'id': obj.status.id,\n 'naam': obj.status.naam,\n 'definitie': obj.status.definitie\n },\n 'aard': {\n 'id': obj.aard.id,\n 'naam': obj.aard.naam,\n 'definitie': obj.aard.definitie\n },\n 'metadata': {\n 'begin_tijd': obj.metadata.begin_tijd,\n 'begin_datum': obj.metadata.begin_datum,\n 'begin_bewerking': {\n 'id': obj.metadata.begin_bewerking.id,\n 'naam': obj.metadata.begin_bewerking.naam,\n 'definitie': obj.metadata.begin_bewerking.definitie\n },\n 'begin_organisatie': {\n 'id': obj.metadata.begin_organisatie.id,\n 'naam': obj.metadata.begin_organisatie.naam,\n 'definitie': obj.metadata.begin_organisatie.definitie\n }\n }\n }",
"def item_from_feed(request):\n\n result = item( request.user, request.POST['sku'] )\n\n return JSONHttpResponse(result)",
"def preset_items(self):\r\n\r\n raise NotImplementedError",
"def test_get_software_asset_bundle_expanded(self):\n pass",
"def item_from_party(request):\n\n result = item( request.user, request.POST['sku'] )\n\n return JSONHttpResponse(result)",
"def test_metadata_subsets_key_list(self):\n self.assertEqual(type(self.metadata.get('subsets', '')), type([]))",
"def basket(request):\n return {'basket': Basket(request)}",
"def test_archive_subset(bbapi):\n subset_name = \"productsSoftware\"\n file_format = \"json\"\n _ = bbapi.bulk.archive_subset(subset_name, file_format)"
] | [
"0.5300971",
"0.5246067",
"0.5177095",
"0.49986303",
"0.49967286",
"0.49637622",
"0.49469367",
"0.49088362",
"0.48998055",
"0.4856261",
"0.48412782",
"0.48360786",
"0.48124474",
"0.47905272",
"0.47800264",
"0.47636992",
"0.46328634",
"0.4622482",
"0.4617957",
"0.4617109",
"0.45983374",
"0.45945174",
"0.45941168",
"0.45938694",
"0.45577595",
"0.45030493",
"0.44955853",
"0.44946077",
"0.44943354",
"0.44844186"
] | 0.70193607 | 0 |
This is used to _get a set inventory /items/{type}/{no}/subsets (see Bricklink API) | def get_subsets(self, itemid: str, itemtypeid: str)->dict:
self.__validate(itemid=itemid, itemtype=itemtypeid)
url = build_uri_template('get_subsets').expand(type=itemtypeid, no=itemid)
logger.info("Getting subsets: {}".format(url))
data = self._get_data(url)
return data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_subsets(self, project):\n serializer = SubsetSerializer(project.subsets.all(), many=True)\n return serializer.data",
"def request_subset_edit(self, request):\n user_id = request['user_id']\n workspace_uuid = request['workspace']['uuid']\n request_list = None\n if 'subsets' in request.keys():\n request_list = request['subsets']\n response = self.list_subsets(workspace_unique_id=workspace_uuid, user_id=user_id, request=request_list)\n \n return response",
"def request_subset_list(self, request):\n user_id = request['user_id']\n workspace_uuid = request['workspace_uuid'] \n \n # Initiate structure \n response = {'workspace': {}, \n 'subsets': []}\n \n # Add workspace info\n response['workspace'] = self.dict_workspace(unique_id=workspace_uuid, user_id=user_id)\n \n subset_list = self.list_subsets(workspace_unique_id=workspace_uuid, user_id=user_id)\n \n # Add subset info \n response['subsets'] = subset_list\n \n return response",
"def get_set_inventory(self, itemid: str)->pd.DataFrame:\n json_inv = self.rc.get_subsets(itemid, ItemType.SET)\n if self.validate_json_set(json_inv):\n inv_list = self._json_inv_to_dict_list(json_inv)\n df = self._inv_dict_list_to_dataframe(inv_list)\n else:\n raise TypeError(\"Bricklink inventory must be a set\")\n return df",
"def list_subsets(self, workspace_unique_id=None, user_id=None, request=None):\n# print('list_subsets_request', request)\n subset_list = []\n# subset_uuid_list = [] \n# sub_request_list = []\n request_for_subset_uuid = self._get_mapping_for_name_in_dict('uuid', request)\n# subset_uuid_list.append(sub['uuid'])\n# sub_request_list.append(sub)\n# else: \n# subset_uuid_list = self.get_subset_list(workspace_unique_id=workspace_unique_id, user_id=user_id)\n# sub_request_list = [None]*len(subset_uuid_list)\n \n# for subset_uuid, sub_request in zip(subset_uuid_list, sub_request_list): \n# print('=====SUBSET_UUID=====')\n# print(workspace_unique_id)\n# print(user_id)\n# print(self.workspaces)\n# print('=====================')\n for subset_uuid in self.get_subset_list(workspace_unique_id=workspace_unique_id, user_id=user_id):\n print('=====SUBSET_UUID', '\"{}\"'.format(subset_uuid))\n sub_request = request_for_subset_uuid.get(subset_uuid, {})\n \n # Check uuid for subset in request (if given) \n# if request:\n# for sub in request:\n# # print(sub)\n# if sub['uuid'] == subset_uuid:\n# break\n \n # Get subset dict\n subset_dict = self.dict_subset(workspace_unique_id=workspace_unique_id, \n subset_unique_id=subset_uuid, \n request=sub_request)\n \n \n \n # Add subset dict to subset list\n subset_list.append(subset_dict)\n \n return subset_list",
"def test_metadata_subsets_key_list(self):\n self.assertEqual(type(self.metadata.get('subsets', '')), type([]))",
"def get_supersets(self, itemid: str, itemtypeid: str)->dict:\n self.__validate(itemid=itemid, itemtype=itemtypeid)\n url = build_uri_template('get_supersets').expand(type=itemtypeid, no=itemid)\n logger.info(\"Getting supersets: {}\".format(url))\n data = self._get_data(url)\n return data",
"def create_subsets(subsets):\n index = 0\n docs = []\n for name,_,stype in subsets:\n record = {\n \"_id\": name,\n \"type\": stype\n }\n docs.append(record)\n index+=1\n return docs",
"def subsets(self) -> list[str]:\n return list(self._subsets.keys())",
"def subsets(self):\n \n # note subsets have an unusual encoding\n query = \"\"\"\n prefix oboInOwl: <http://www.geneontology.org/formats/oboInOwl#>\n SELECT DISTINCT ?s WHERE {{\n GRAPH <{g}> {{\n ?c oboInOwl:inSubset ?s \n }}\n }}\n \"\"\".format(g=self.graph_name)\n bindings = run_sparql(query)\n return [r['s']['value'] for r in bindings]",
"def ifc_subsets(me, ifc):\n return me._ifc_subs.get(ifc, frozenset())",
"def get_subset(self, name):\n assert name in ['train', 'valid', 'test'], f'Invalid dataset name! {name} has been provided but must be one of [train, valid, test]'\n return self.subsets[name]",
"def test_archive_subset(bbapi):\n subset_name = \"productsSoftware\"\n file_format = \"json\"\n _ = bbapi.bulk.archive_subset(subset_name, file_format)",
"def build_subsets(self):\n self.all = nrn.SectionList()\n self.all.wholetree(sec=self.soma)",
"def subset_gen(itemSet):\n subsets = []\n for i in range(1, len(itemSet)):\n c = combinations(itemSet, r=i)\n for cc in c:\n subsets.append(set(cc))\n return subsets",
"def internalSubset(self, name, externalID, systemID):\n pass",
"def request_subset_by_siteid(prod, band, network, siteid, start_date, end_date, \n qcfiltered = False):\n \n modis_start = modis_to_from_pydatetime(start_date)\n modis_end = modis_to_from_pydatetime(end_date)\n subset_str = '/subsetFiltered?' if qcfiltered else '/subset?'\n url_str = (''.join([api_base_url, prod, '/', network, '/', siteid, \n subset_str, band, '&startDate=', modis_start,\n '&endDate=', modis_end]))\n header = {'Accept': 'application/json'}\n response = requests.get(url_str, headers = header)\n subset = (json.loads(response.text))\n return _process_data([subset], band)",
"def request_subset_create(self, request):\n user_id = request['user_id']\n workspace_uuid = request['workspace_uuid']\n subset_uuid = request['subset_uuid']\n new_alias = request['alias']\n \n return_dict = self.copy_subset(user_id, \n workspace_uuid=workspace_uuid, \n subset_source_uuid=subset_uuid, \n subset_target_alias=new_alias)\n if return_dict:\n subset_uuid = return_dict['uuid']\n else:\n uuid_mapping = self._get_uuid_mapping_object(user_id)\n subset_uuid = uuid_mapping.get_uuid(alias=new_alias, user_id=user_id)\n response = self.dict_subset(workspace_unique_id=workspace_uuid, \n subset_unique_id=subset_uuid)\n \n return response",
"def externalSubset(self, name, externalID, systemID):\n pass",
"def get_item_sets_name():\n with open(SETS_NAME_FILE) as sets_file:\n sets = sets_file.read()\n return json.loads(sets)",
"def k_subsets(set_, k):\n ensure_countable(set_)\n\n if not isinstance(k, Integral):\n raise TypeError(\"subset cardinality must be a number\")\n if not (k >= 0):\n raise ValueError(\"subset cardinality must be positive\")\n if not (k <= len(set_)):\n raise ValueError(\"subset cardinality must not exceed set cardinality\")\n\n result = combinations(set_, k)\n return _harmonize_subset_types(set_, result)",
"def test_subset_imask_non_iterable_subset(self, model_data, imask_subset_config):\n foreach = [\"nodes\", \"techs\"]\n imask = _imask_foreach(model_data, foreach)\n\n with pytest.raises(TypeError) as excinfo:\n _subset_imask(\n \"foo\", AttrDict({\"foreach\": foreach, \"subset.nodes\": \"bar\"}), imask\n )\n assert check_error_or_warning(\n excinfo,\n \"set `foo` must subset over an iterable, instead got non-iterable `bar` for subset `nodes`\",\n )",
"def subsets(self):\n return set(self.subset_map.values())",
"def _get_package_items(self):\r\n mask = \"mask[description,capacity,prices.id,categories[name,id]]\"\r\n package = self.client['Product_Package']\r\n return package.getItems(id=46, mask=mask)",
"def inventory(self):\n data = self.client.inventory(self.creds, self.transaction, self.environment)\n return list(data) if isinstance(data, set) else data",
"def _validate_subsets(self, subsets: Sequence[str]) -> Sequence[str]:\n if not subsets:\n raise ValueError(\"no subsets specified\")\n for subset in subsets:\n if subset not in self.data_files.keys():\n raise ValueError(f\"{subset} is not valid\")\n return subsets",
"def set_prodmaterials(_craftable):\n partial_resources = []\n partial_craftables = []\n _craftable.resources_list = []\n _craftable.craftables_list = []\n\n # Checking for Production Materials\n for material, quantity in _craftable.craft_materials.items():\n # Checking for Resources or Workbench Need\n if 'Minimum Bench Cost' in material or material in resources.keys():\n _craftable.resources_list.append([material, int(quantity)])\n _craftable.res_totalcost = float(_craftable.res_totalcost + resources[material].unit_price * int(quantity))\n\n # Check for sub-craftables as a need\n elif material not in resources.keys():\n _craftable.craftables_list.append([material, int(quantity)])\n\n return partial_resources, partial_craftables",
"def _getSubset(self, label):\n\n if self._flags is None:\n self._flags = {}\n self._subsets = {}\n elif flags.TIMESTAMP != self._flagsts:\n self._resetFlags()\n self._flagsts = flags.TIMESTAMP\n\n try:\n return self._subsets[label]\n except KeyError:\n flgs = self._getFlags(label)\n try:\n return self._subsets[label]\n except KeyError:\n indices = flgs.nonzero()[0]\n self._setSubset(label, indices)\n return indices.copy()",
"def list_subset_contents(set_id):\n #TODO: why don't we just return a list of IDs in both cases?\n #TODO: why do we need to return the ID?\n\n contents = {}\n requested_set = Set.query.get(set_id)\n\n contents[\"text\"] = requested_set.name\n contents[\"id\"] = requested_set.id\n contents[\"date\"] = requested_set.date\n contents[\"type\"] = requested_set.type\n\n if requested_set.type == \"sequenceset\":\n contents[\"phrases\"] = [sequence.sequence for sequence in\n requested_set.sequences]\n\n else:\n contents[\"ids\"] = [item.id for item in requested_set.get_items()]\n\n return jsonify(contents)",
"def test_get_sets_by_category():\n\tgroup_categories = get_sets_by_category(mb, \"Group\")\n\tassert len(group_categories) == 5"
] | [
"0.66228426",
"0.658018",
"0.6361112",
"0.621814",
"0.61558557",
"0.6032727",
"0.58454335",
"0.5843885",
"0.5832506",
"0.5706368",
"0.5574686",
"0.55048376",
"0.54442704",
"0.54374105",
"0.5397158",
"0.5358623",
"0.5331448",
"0.5311054",
"0.52144516",
"0.5145197",
"0.5139706",
"0.51380366",
"0.5100597",
"0.50630414",
"0.5023523",
"0.5002388",
"0.49929154",
"0.49668872",
"0.49652922",
"0.4961342"
] | 0.7219529 | 0 |
Generic function to extract a list from a binary file. | def read_list_bin(file_name):
try:
extracted_list = []
with open(file_name, "rb") as binary_file:
extracted_list = pickle.load(binary_file)
return extracted_list
except FileNotFoundError:
print("File not found: ",file_name)
except Exception as e:
print(type(e), e)
sys.exit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _decode_list(fp):\n tag_id = _decode_byte(fp)\n size = _decode_int(fp)\n return [_MAP[tag_id](fp) for _ in range(size)]",
"def read_file_into_list(source_file):\n\twith open(source_file, 'r') as source:\n\t\tdata = base64.b64encode(source.read())\n\t\treturn [data[i:i+SPLIT_LENGTH] for i in range(0, len(data), SPLIT_LENGTH)]",
"def read_bytes_to_list(path):\n vstup = []\n index = 0\n with open(path, \"rb\") as f:\n\n byte = f.read(1)\n while byte != '':\n index = index + 1\n vstup.append(struct.unpack('b', byte)[0])\n byte = f.read(1)\n\n if not byte:\n break\n return vstup",
"def _to_list(self, file_str):\n data_list = file_str.split()\n return data_list",
"def read_file_into_list(source_file):\n with open(source_file, 'r') as source:\n data = base64.b64encode(source.read())\n return [data[i:i+SPLIT_LENGTH] for i in range(0, len(data), SPLIT_LENGTH)]",
"def read_list(fname):\n with open(fname) as handle:\n items = [line.strip() for line in handle]\n return items",
"def FileList(file):\n with open(file,\"r\") as f:\n list1 = [r.split()[1] for r in f]\n list1 = [int(i) for i in list1]\n return list1",
"def file_to_list(path):\n fd = open(path)\n t = list()\n for line in fd:\n t += process_line(line)\n\n return t",
"def get_file_as_list(filename):\n try:\n with open(filename, 'r') as f:\n output = f.read().split(\"\\n\")\n cprint(\"Loaded file: %s\" % filename)\n except IOError:\n log(\"Cannot open file: %s (%s)\" % (filename, str(sys.exc_info())))\n output = []\n return output",
"def get_listfile(self, datadir):\n return []",
"def convert_input_to_list():\n\n f = open('pizza_source.txt', 'r')\n file_to_list = f.read().split('\\n')\n\n return file_to_list",
"def read_list(self, register, length):\n raise NotImplementedError",
"def parse_file(self, file_path) -> list:\n data = []\n with open(file_path, 'rb') as f:\n lines = pickle.load(f)\n for line in lines:\n input, output = line\n if input.strip() == \"\" or output.strip() == \"\":\n continue\n input_len = len(input.split())\n output_len = len(output.split())\n if input_len > 50 or output_len > 50:\n continue\n data_item = Text2TextDataItem(input_text=input, output_text=output, tokenizer=self.tokenizer,\n share_vocab=self.share_vocab)\n data.append(data_item)\n return data",
"def convert_file_to_list(file_path, separator=\"\\n\"):\n # gets the content of the file\n contents = read_file(file_path)\n # splits the content by a separator\n return contents.split(separator)",
"def test_deserialize_list():\n input = bytes([\n *UnsignedInt.to_bytes(5),\n *UnsignedInt.to_bytes(1),\n *UnsignedInt.to_bytes(2),\n *UnsignedInt.to_bytes(3),\n *UnsignedInt.to_bytes(4),\n *UnsignedInt.to_bytes(5),\n ])\n assert [1, 2, 3, 4, 5] == List(UnsignedInt).read(input)",
"def load_file(file, split_value=\"\\n\", cast_int=False):\n try:\n input_file = open(file, \"r\")\n output_list = input_file.read().split(split_value)\n input_file.close()\n\n if cast_int:\n return [int(i) for i in output_list]\n\n return output_list\n except IOError:\n sys.exit(\"ERROR: Cannot load file: %s\" % file)",
"def listfromfilelines(file):\r\n with open(file, 'r') as f:\r\n list = [line.strip().decode('utf-8') for line in f]\r\n return list",
"def getlistfromtext(self,filename):\n l=[]\n\n if self.encoding:\n f = codecs.open(filename,\"r\",encoding=self.encoding)\n for line in f:\n l.append(line.rstrip())\n f.close()\n\n else:\n f = open(filename,\"r\")\n for line in f:\n l.append(line.rstrip())\n f.close()\n return l",
"def file_to_list(file_name):\r\n fr = open(file_name, encoding = 'utf-8')\r\n l = [line.strip() for line in fr]\r\n fr.close()\r\n return l",
"def read_file_data_to_list(file_name):\r\n file = open(file_name, \"r\")\r\n data = file.readlines() # reads rows of data into a list object\r\n file.close()\r\n return data",
"def Read2000256List(self):\n items = []\n for i in range(0, 2000):\n data = self.ReadBytes(64)\n ba = bytearray(binascii.unhexlify(data))\n ba.reverse()\n items.append(ba.hex().encode('utf-8'))\n return items",
"def LoadListFile(file):\n\tlst = []\n\ttry:\n\t\twith open(file,'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tline = line.rstrip()\n\t\t\t\tlst.append(line)\n\texcept:\n\t\treturn []\n\treturn lst",
"def loadList(file_name):\n with open(file_name) as f:\n l = [line.strip() for line in f]\n return l",
"def blob_to_list(blob):\r\n splits = blob.split(blob_delimiter)\r\n items = []\r\n for item in splits:\r\n items.append(item.replace(blob_delimiter_replacement, blob_delimiter))\r\n return items",
"def make_list(list_file):\r\n return [line.strip('\\n').strip('\\r') for line in open(list_file)]",
"def load_pickle_to_list(filename, squeeze=True):\n f = open(filename,'r')\n data = []\n while True:\n try:\n data.append(pickle.load(f))\n except EOFError:\n break\n assert len(data) >= 1\n f.close()\n \n # if length is one, might as well just 'squeeze' it...\n if len(data) == 1 and squeeze:\n return data[0]\n else:\n return data",
"def get_file_contents_as_list(file_name):\n with open(file_name) as file:\n data = file.read().splitlines()\n return data",
"def load_list(name, path, offset, chunk_length=0):\n ret = []\n with open(os.path.join(path, name) + '.pkl', 'rb') as f:\n f.seek(offset)\n if chunk_length == 0:\n while True:\n try:\n ret.append(pickle.load(f))\n except:\n return ret\n for i in range(chunk_length):\n try:\n ret.append(pickle.load(f))\n except:\n return ret, f.tell()\n return ret, f.tell()",
"def load_list(filename):\n # Open the file\n with open(filename, 'r', newline='') as f:\n # Use the CSV library to load the file\n reader = csv.reader(f)\n # Return the full list to the caller of the function. The 'list' in this line converts the 'reader' object to a list type\n # using a process called 'casting'. https://www.w3schools.com/python/python_casting.asp\n return(list(reader))\n #endwith",
"def read_file_into_list(filename):\n with open(filename) as file:\n return file.readlines()"
] | [
"0.70250535",
"0.66882926",
"0.6643177",
"0.65935946",
"0.655588",
"0.6189796",
"0.61198866",
"0.6115346",
"0.609496",
"0.6092333",
"0.6032443",
"0.6011349",
"0.6009787",
"0.60039115",
"0.5989868",
"0.5981291",
"0.5955054",
"0.59242487",
"0.5908565",
"0.5890663",
"0.58747673",
"0.5864051",
"0.5861675",
"0.5860981",
"0.5860416",
"0.5857758",
"0.58530486",
"0.5852783",
"0.5847733",
"0.583489"
] | 0.7575874 | 0 |
Generic function to write a list to a binary file (replace content). | def write_list_bin(inserted_list, file_name):
try:
with open(file_name, "wb") as binary_file:
pickle.dump(inserted_list, binary_file)
except Exception as e:
print(type(e), e)
sys.exit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write(lst):\n # TODO",
"def save_list_to_file(content: list, dst_path: str, append=False) -> None:\n with io.open(file=dst_path, mode=\"a\" if append else \"w\", encoding='utf-8') as destination_file:\n for element in content:\n destination_file.write(element + \"\\n\")",
"def save_list_to_file(the_list, filepath):\n with open(filepath, 'w') as file_handler:\n for item in the_list:\n file_handler.write(\"{}\\n\".format(item))",
"def write_file(file_path, payload_list):\n\n with open(file_path, \"wb\") as f:\n count = 0\n while count < len(payload_list):\n f.write(payload_list[count])\n count = count + 1",
"def __writeToFile(self, filePath, lst): \n \n if not self.outDir is None: \n filePath = os.path.join(self.outDir, filePath) \n \n open(filePath,'a').writelines(lst)",
"def list_to_file(l, file_name):\r\n fw = open(file_name, 'w', encoding = 'utf-8')\r\n fw.write('\\n'.join(l))\r\n fw.close()",
"def save_list(list_data, path, lineterminator='\\n', encoding=None, mode='w'):\n with open(path, mode) as f:\n list_data = [item + lineterminator for item in list_data]\n if encoding is not None:\n list_data = [item.encode(encoding) for item in list_data]\n\n f.writelines(list_data)",
"def create_data_file_from_list(lst, out_filename, dtype, shape):\n with open(out_filename, 'wb+') as out_file:\n out_file = open(out_filename, 'wb+')\n dat_file = np.memmap(out_file, dtype=dtype, shape=shape)\n dat_file[:] = lst[:]\n dat_file.flush()\n size = float(dat_file.nbytes) / (1024 ** 2)\n print('written %s : %.3f MB' % (out_filename, size))",
"def write_list_to_file(file_name: str, list_name: List[str]):\n # Write to a file, overwriting the old contents\n file = open(file_name, 'w')\n\n # Loop through the list, append a newline character to each line\n for item in list_name:\n file.writelines(item + '\\n')\n\n # Close the file\n file.close()",
"def write_list_to_file(myList, filename):\r\n\r\n with open(filename, \"w\") as outfile:\r\n for entries in myList:\r\n outfile.write(entries)\r\n\t\t\t# add a return after each line\r\n outfile.write(\"\\n\")",
"def SaveListFile(file,lst):\n\tlst = [str(i) +\"\\n\" for i in lst]\n\tif len(lst) == 0:\n\t\treturn\n\twith open(file,'w') as f:\n\t\tf.writelines(lst)\n\treturn lst",
"def write_into_file(name, liste):\n file = open(name, \"w\")\n for item in liste:\n file.write(item)\n file.write('\\n')\n file.close()",
"def write_list(l, fname):\n thefile = open(fname, \"w\")\n for line in l:\n thefile.write(\"%s\\n\" % line)\n thefile.close()",
"def save_to_file(cls, list_objs):\n the_list = []\n if list_objs is not None:\n for stuff in list_objs:\n new_stuff = stuff.to_dictionary()\n the_list.append(new_stuff)\n the_list = Base.to_json_string(the_list)\n with open(\"{}.json\".format(cls.__name__), mode='w') as f:\n f.write(str(the_list))",
"def bin_writer(fpath, fname, data):\n path = fpath + fname + '.dat'\n with open(path, 'ab') as file:\n for row in data:\n file.write(row.encode('utf-8'))\n return None",
"def write_list(self, register, data):\n raise NotImplementedError",
"def write_list_to_file(program, list_to_write):\n with open(program.split('.')[0] + \".output.json\", 'a+') as output_file:\n output_file.write(json.dumps(list_to_write, indent=3, sort_keys=False))",
"def write_list(args, file_list):\n if not args.listfile.endswith(\".txt\"):\n args.listfile += \".txt\"\n outputfile = open(args.listfile, 'w')\n for name in file_list:\n outputfile.write(name)\n outputfile.write(\"\\n\")\n outputfile.close()",
"def save_list(lines, filename):\n data = '\\n'.join(lines)\n file = open(filename, 'w')\n file.write(data)\n file.close()",
"def write_lines(list_of_lines, file):\r\n for i in range(0, len(list_of_lines)):\r\n file.write(list_of_lines[i] + b\"\\n\")",
"def write_list_to_file(ls, save_path):\n # Open in appendation mode given that this function may be called multiple\n # times on the same file (positive and negative sentiment are in separate\n # directories).\n out_file = open(save_path, \"w+\")\n for example in ls:\n out_file.write(example)\n out_file.write('\\n')",
"def csv_save_list(list_data, path, lineterminator='\\n', encoding=None):\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator=lineterminator)\n for item in list_data:\n if encoding is not None:\n writer.writerow([item.encode(encoding)])\n else:\n writer.writerow([item])",
"def write_list(self):\n with open(self.path, 'w') as file:\n for i in map(self.addziros, range(1, int(str(1) + self.number_length * '0') + 1)):\n file.write(i + '\\n')\n file.close()",
"def lst_to_file(comment_lst, fullpathname):\n with open(fullpathname, 'w') as writer:\n for comment in comment_lst:\n clean_raw_comment = repr(comment).lstrip('\"\\'').rstrip('\"\\'')\n writer.write(clean_raw_comment + '\\n')",
"def save_list_of_list(data, path, lineterminator='\\n', encoding=None):\n with open(path, 'w') as f:\n writer = csv.writer(f, lineterminator=lineterminator)\n if encoding is not None:\n data = [[item.encoding(encoding) for item in items]\n for items in data]\n writer.writerows(data)",
"def writeList2File(filename, array, overwrite=False, separator=';'):\n mode = 'a'\n if overwrite:\n mode = 'w'\n file = open(filename, mode)\n file.write(separator.join(map(str,array)) + '\\n')",
"def save_to_file(cls, list_objs):\n filename = cls.__name__ + \".json\"\n new_list = []\n with open(filename, \"w\") as fp:\n if list_objs is None:\n fp.write(\"[]\")\n else:\n for objs in list_objs:\n new_list.append(cls.to_dictionary(objs))\n fp.write(cls.to_json_string(new_list))",
"def write_string_list_to_file(string_list, filename):\n with open(filename, 'w') as f:\n for element in string_list:\n f.write(element+'\\n')",
"def writeStrListToFile(ldata, filePath, delem=\",\"):\n\twith open(filePath, \"w\") as fh:\n\t\tfor r in ldata:\n\t\t\tif type(r) == list:\n\t\t\t\tr = delem.join(r)\n\t\t\tfh.write(r + \"\\n\")",
"def save_to_file(cls, list_objs):\n namefile = cls.__name__ + \".json\"\n rep_list = []\n if list_objs is not None and list_objs != []:\n for item in list_objs:\n repre = cls.to_dictionary(item)\n # rep_list.append(cls.to_json_string(repre))\n rep_list.append(repre)\n\n with open(namefile, \"w\", encoding=\"UTF-8\") as f:\n # json.dump(rep_list, f)\n f.write(cls.to_json_string(rep_list))"
] | [
"0.7168718",
"0.684559",
"0.680929",
"0.6803841",
"0.6670353",
"0.66512245",
"0.6639571",
"0.6587721",
"0.6537934",
"0.6437597",
"0.63995486",
"0.63601077",
"0.6337326",
"0.6311502",
"0.6271015",
"0.6202361",
"0.61834747",
"0.6147879",
"0.61425006",
"0.6129202",
"0.6047813",
"0.60407335",
"0.60124695",
"0.60088664",
"0.5999961",
"0.5997965",
"0.5977509",
"0.5977293",
"0.59599864",
"0.59490585"
] | 0.7379721 | 0 |
Generic function to check if an index already exists in a list of AutoBaseObject. | def index_already_there(index, given_list):
# check if ID already exists
already_there = False
if len(given_list)>0:
for item in given_list:
if isinstance(item, AutoBaseObject):
if item.ID == index:
already_there = True
break
else:
print("Issue with list: item is not AutoBaseObject")
print(" index=\n",index)
sys.exit()
return already_there | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_object_repeated(lists, obj):\n for any_obj in lists:\n if check_tuples(any_obj['indexes'], obj['indexes']):\n return None\n return obj",
"def __contains__(self, item):\n return item in self._index_map",
"def __contains__(self, item: Any) -> bool:\n return item in self.item_to_index",
"def __contains__(self, idx):\n return idx in self._data",
"def has(self, index):\n raise NotImplementedError()",
"def _check_indexes(cls, document: dict) -> bool:\n criteria = [\n field_name\n for field_name in cls._get_index_fields(IndexType.Other, document, \"\")\n ]\n unique_criteria = [\n field_name\n for field_name in cls._get_index_fields(IndexType.Unique, document, \"\")\n ]\n index_name = f\"idx{cls.__collection_name__}\"\n unique_index_name = f\"uidx{cls.__collection_name__}\"\n indexes = cls.__collection__.list_indexes()\n cls.logger.debug(f\"Checking existing indexes: {indexes}\")\n indexes = {\n index[\"name\"]: index[\"key\"].keys()\n for index in indexes\n if \"name\" in index and \"key\" in index\n }\n return (\n (criteria and index_name not in indexes)\n or (not criteria and index_name in indexes)\n or (criteria and index_name in indexes and criteria != indexes[index_name])\n or (unique_criteria and unique_index_name not in indexes)\n or (not unique_criteria and unique_index_name in indexes)\n or (\n unique_criteria\n and unique_index_name in indexes\n and unique_criteria != indexes[unique_index_name]\n )\n )",
"def does_exist(self, index):\n if index in self.map:\n return True\n return False",
"def get_indexed_item_from_list(index, given_list):\n\n returned_item = None\n\n if len(given_list)>0:\n for item in given_list:\n if isinstance(item, AutoBaseObject):\n if item.ID == index:\n returned_item = item\n break\n else:\n print(\"Issue with list: item is not AutoBaseObject\")\n print(\" index=\\n\",index)\n sys.exit()\n return returned_item",
"def has_index(self, index):\n return index in [s[0] for s in self.get_index_list()]",
"def __contains__(self, item):\n # return item in self._items\n # leverage improved performance index() function\n try:\n self.index(item)\n return True\n except ValueError:\n return False",
"def __contains__(self, index):\r\n\r\n return index in self._contents",
"def _idxs_are_present(self, *args):\n return set(args).issubset(set(range(self.n_atoms)))",
"def __contains__(self, key):\n return (key in self.index)",
"def check_indexes(check_func, value):\n\tfor set_name, index_path in zip(SET_NAMES, INDEX_PATHS):\n\t\tcheck_func(set_name, index_path, value)",
"def __contains__(self, index):\n\n return index in self._contents",
"def __contains__(self, atom_idx):\n if isinstance(atom_idx, Atom):\n return self.atom_list.__contains__(atom_idx)\n elif isinstance(atom_idx, str):\n return self.atom_dict.__contains__(atom_idx)\n raise TypeError, atom_idx",
"def __contains__(self, key):\n return key in self._index",
"def __contains__(self, i):\n return i in self._ar",
"def objExists(*args, **kwargs)->bool:\n pass",
"def _checkIndex(self, index):\n # OPT: lets not reuse isKnown, to don't incure 1 more function\n # call\n if not self._items.has_key(index):\n raise KeyError, \\\n \"%s of %s has no key '%s' registered\" \\\n % (self.__class__.__name__,\n self.__owner.__class__.__name__,\n index)",
"def exist(self,list,a):\r\n\t\ti = 0\r\n\t\tfor elem in list:\r\n\t\t\tif (elem == a):\r\n\t\t\t\ti=i+1\r\n\t\tif (i>0):\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False",
"def index_exists(self, index):\n req = requests.head(\n urljoin(self.base_url, '{0}'.format(index)),\n verify=self.verify_certs)\n return req.status_code == 200",
"def __contains__(self, record):\n with self.session as session:\n query = session.query(IndexRecord)\n query = query.filter(IndexRecord.did == record)\n\n return query.exists()",
"def index_object(idxs=None):",
"def exists(self, obj):\n return False",
"def index_is_in_list(the_list, index):\n return bool(0 <= index < len(the_list))",
"def __contains__(self, obj):\n pass",
"def IndexExists(self, arg0: 'unsigned long long') -> \"bool\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF3GQEULLULLBBT_IndexExists(self, arg0)",
"def IndexExists(self, arg0: 'unsigned long long') -> \"bool\":\n return _itkQuadEdgeCellTraitsInfoPython.itkMapContainerULLQEMPF2GQEULLULLBBT_IndexExists(self, arg0)",
"def id_in_list(obj_list, sb_object):\n if __debug__:\n print(\"Checking if sb_object in list...\")\n for sb_objects in obj_list:\n if sb_object.ID == sb_objects.ID:\n if __debug__:\n print(\"Object in list.\")\n return True\n if __debug__:\n print(\"Object not in list\")\n return False"
] | [
"0.6840094",
"0.6636097",
"0.65430546",
"0.6385054",
"0.6328272",
"0.6273341",
"0.6229249",
"0.62248135",
"0.6148913",
"0.61416686",
"0.6109743",
"0.6041912",
"0.5994398",
"0.59898144",
"0.59320223",
"0.5921886",
"0.59082884",
"0.5905032",
"0.5897877",
"0.58703214",
"0.58546203",
"0.58528864",
"0.5812772",
"0.5807456",
"0.5788474",
"0.57650214",
"0.5761512",
"0.573809",
"0.57271045",
"0.57146674"
] | 0.80496514 | 0 |
Generic function to get an indexed entry from a list of AutoBaseObject. | def get_indexed_item_from_list(index, given_list):
returned_item = None
if len(given_list)>0:
for item in given_list:
if isinstance(item, AutoBaseObject):
if item.ID == index:
returned_item = item
break
else:
print("Issue with list: item is not AutoBaseObject")
print(" index=\n",index)
sys.exit()
return returned_item | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def index_object(idxs=None):",
"def get_entry(obj, *path):\n\n try:\n for elem in path:\n is_index = isinstance(elem, int)\n is_list = isinstance(obj, list)\n if is_index != is_list:\n raise UpdateException('index given for non-list or vice versa')\n obj = obj[elem]\n return obj\n except Exception as ex:\n path_str = '/'.join(map(str, path))\n msg = f'unable to access object path \"/{path_str}\"'\n raise UpdateException(msg) from ex",
"def find_index(self, obj):\n return self.model.indexlist[obj]",
"def __getitem__(self, item):\n if isinstance(item, str):\n item = [i for i, v in enumerate(self.list) if item == v.name]\n if len(item) > 0:\n item = item[0]\n return self.list[item]",
"def __getitem__(self, index):\n try:\n if isinstance(index, int):\n # the only reliable way is to iterate up to the index:\n return next(islice(self, index, None))\n if isinstance(index, slice):\n return list(islice(self, index.start, index.stop, index.step))\n else:\n key_return = list(self._dictitem_gen(index))\n if self.KEY_ACCESS_REDUCE_SINGLETONS and len(key_return) == 1:\n return key_return[0]\n else:\n return key_return\n except StopIteration:\n raise IndexError(\"list index out of range\")",
"def __getitem__(self, index: int) -> object:\n return self.get_at_index(index)",
"def __getitem__( self, index ) :\n\n return( self.__entries[index] )",
"def __getitem__(self, key):\n return self.list[key]",
"def get_object_by_id(self, object_list, object_id):\n obj = None\n for i in object_list:\n if i.get_id() == object_id:\n obj = i\n break\n return obj",
"def get_from_list(self,list_,index):\r\n\r\n\r\n try:\r\n return list_[self._index_to_int(index)]\r\n except IndexError:\r\n self._index_error(list_,index)",
"def __getitem__(self, index):\n return self._record_list[index]",
"def get(self, idx):\n if idx in self._objects:\n return self._objects[idx]\n else:\n warning(\"%s not found\" % idx)\n return None",
"def __getitem__(self, item):\n return self.getList()",
"def __getitem__(self, index):\n return self.to_list()[index]",
"def get(*, list : Union[List[Any], ConduitVariable], index : int) -> Any:\n return list[index]",
"def __index__(self, ???):",
"def __getitem__(self,idx):\n try:\n return self._cache[idx]\n except:\n pass\n\n try:\n # return full data entry as list\n out = self._data[idx]\n self._cache[idx] = out\n return out\n except:\n try:\n # return data entry with specified key word\n out = self._data[idx[0]][self._header[self._alias[idx[1]]]]\n self._cache[idx] = out\n return out\n except:\n pass",
"def _get_item(self, cont, index):\n # make sure the given object is a container:\n if not isinstance(cont, collections.Container):\n raise Exception(\"'%s': not a container: cannot index '%s' in '%s'\"\n % (self.name, index, cont))\n\n # try and return the element. Even an exception may or may\n # not be specified (ex: CFN's GetAtt: AvailabilityZone):\n try:\n # NOTE: we can't just test with 'in' here as we may\n # be trying to index a list:\n return cont[index]\n except (IndexError, KeyError):\n # if not found; make sure it's not an exception:\n if index in self._exceptions:\n # just log the event and return the arg directly:\n LOG.warn(\"'%s': get exception applied for '%s'. Defaulting to\"\n \" '%s'.\", self.name, index, self._exceptions[index])\n return self._exceptions[index]\n else:\n # rock bottom:\n raise FunctionApplicationException(\n \"'%s': index '%s' missing from :'%s'\" % (\n self.name, index, cont\n )\n )",
"def get_obj(self, idx):\n if idx >= self.object_dataset.get_nb_obj():\n raise ValueError(\"idx is greater than the number of objects\")\n return self.object_dataset.get_obj(idx)",
"def get(self, index):\n self.__validate_index(index)\n return self.__list[index]",
"def find_object(field, list):\n for item in list:\n if item.name == field:\n return item\n return None",
"def __getitem__(self, idx):\n return self.items[idx]",
"def select_object_at_index(self, index):\n\t\treturn self.object_list[index]",
"def findByIndex(self, obj_index):\n return self.registry.findByIndex(obj_index)",
"def GetEntityByItem(self,i):\n\t\treturn self.Space.Item(i)",
"def __getitem__(self, item: str) -> Account:\n return self.accounts[item]",
"def _get_tracklet(tracks: dict, idx: int) -> list:\n target = [t for t in tracks.values() if t[0] == idx]\n if target:\n return target[0]\n else:\n raise ValueError(\"Object ID not found.\")",
"def __getitem__(self, x):\n return self.query(x)",
"def index(self, item, **kwargs):\n # type: (Any, dict) -> int\n return list.index(self, self.ref(item), **kwargs)",
"def __getitem__(self, index):\r\n return self._items[index]"
] | [
"0.611796",
"0.5894193",
"0.5874042",
"0.58262813",
"0.57515997",
"0.573731",
"0.57367045",
"0.56926435",
"0.5678464",
"0.5667245",
"0.5613185",
"0.558198",
"0.55727726",
"0.55678636",
"0.5559255",
"0.55462104",
"0.5529028",
"0.5526751",
"0.552012",
"0.5509918",
"0.5496633",
"0.5495248",
"0.548764",
"0.5470534",
"0.54562676",
"0.5448505",
"0.54435307",
"0.54116243",
"0.5406097",
"0.5399106"
] | 0.75683933 | 0 |
Generic function to get an indexed entry from a list of AutoBaseObject stored in a binary file. | def get_indexed_item_from_file(index, file_name):
list_in_file = read_list_bin(file_name)
return get_indexed_item_from_list(index, list_in_file) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_indexed_item_from_list(index, given_list):\n\n returned_item = None\n\n if len(given_list)>0:\n for item in given_list:\n if isinstance(item, AutoBaseObject):\n if item.ID == index:\n returned_item = item\n break\n else:\n print(\"Issue with list: item is not AutoBaseObject\")\n print(\" index=\\n\",index)\n sys.exit()\n return returned_item",
"def get_entry(obj, *path):\n\n try:\n for elem in path:\n is_index = isinstance(elem, int)\n is_list = isinstance(obj, list)\n if is_index != is_list:\n raise UpdateException('index given for non-list or vice versa')\n obj = obj[elem]\n return obj\n except Exception as ex:\n path_str = '/'.join(map(str, path))\n msg = f'unable to access object path \"/{path_str}\"'\n raise UpdateException(msg) from ex",
"def index_object(idxs=None):",
"def index_fobj(fobj):\n doc = fileobject_to_dict(fobj)\n if doc is not None:\n #print doc\n SOLR.add(doc)\n else:\n pass",
"def load_index(self, fn):\n name = fn.split('.pkl')[0]\n return utils.load_obj(name)",
"def fetch(index, outfile):\n populate_index(index, outfile=outfile)",
"def get_object_by_id(self, object_list, object_id):\n obj = None\n for i in object_list:\n if i.get_id() == object_id:\n obj = i\n break\n return obj",
"def ReadIndex_binary(indexfile, isPrintWarning = False):#{{{\n# return (indexList, headerinfo, dbfileindexList)\n indexList = []\n indexFileHeaderText = []\n size_indexfile = os.path.getsize(indexfile)\n cntReadByte = 0\n try:\n fpin=open(indexfile, \"rb\")\n vI = array('I')\n vI.fromfile(fpin,1)\n cntReadByte += vI.itemsize\n dumpedtext = fpin.read(vI[0])\n cntReadByte += vI[0]\n\n strs = dumpedtext.split(\"\\n\")\n origdbname = \"\"\n origversion = \"\"\n origext = \"\"\n origprefix = \"\"\n for line in strs:\n if not line or line[0] == \"#\":\n continue\n ss=line.split()\n if ss[0] == \"DEF_DBNAME\":\n if len(ss)>=2:\n origdbname=ss[1]\n elif ss[0] == \"DEF_VERSION\":\n if len(ss)>=2:\n origversion=ss[1]\n elif ss[0] == \"DEF_EXTENSION\":\n if len(ss)>=2:\n origext=ss[1]\n elif ss[0] == \"DEF_PREFIX\":\n if len(ss)>=2:\n origprefix=ss[1]\n if isPrintWarning:\n if origversion == \"\": \n msg = \"{}: Warning! No version info in the index file {}\"\n print(msg.format(sys.argv[0],indexfile), file=sys.stderr)\n elif origversion != version:\n msg = \"{}: Warning! Version conflicts. \"\\\n \"Version of the index file {} ({}) \"\\\n \"!= version of the program ({})\"\n print(msg.format(sys.argv[0], indexfile,\n origversion, version), file=sys.stderr)\n\n headerinfo = (origdbname, origversion, origext, origprefix)\n #read in other information\n vI = array('I')\n vI.fromfile(fpin,1)\n cntReadByte += vI.itemsize\n\n dumpedidlist=fpin.read(vI[0])\n cntReadByte += vI[0]\n\n idlist = dumpedidlist.split(\"\\n\")\n vI=array('I')\n vI.fromfile(fpin,1)\n cntReadByte += vI.itemsize\n\n numRecord = vI[0]\n if numRecord != len(idlist):\n msg = \"{}: numID ({}) != numRecord ({}) for indexfile {} \"\n print(msg.format(sys.argv[0], len(idlist),\n numRecord, indexfile), file=sys.stderr)\n\n sizeRecord_I = (array('B').itemsize + array('I').itemsize +\n array('I').itemsize)\n sizeRecord_L = (array('B').itemsize + array('L').itemsize +\n array('I').itemsize)\n sizeRecord = int(mybase.FloatDivision(size_indexfile - cntReadByte, numRecord))\n if abs(sizeRecord - sizeRecord_I) < abs(sizeRecord - sizeRecord_L):\n vIarray=[array('B'), array('I'), array('I')]\n else:\n vIarray=[array('B'), array('L'), array('I')]\n for i in range(3):\n vIarray[i].fromfile(fpin,numRecord)\n\n lastDBFileIndex = vIarray[0][numRecord-1]\n dbfileindexList = list(range(lastDBFileIndex+1))\n\n indexList.append(idlist)\n for i in range(3):\n indexList.append(vIarray[i])\n fpin.close()\n return (indexList, headerinfo, dbfileindexList)\n except IOError:\n msg = \"Failed to read index file {} in function {}\"\n print(msg.format(indexfile, sys._getframe().f_code.co_name), file=sys.stderr)\n return (None, None, None)",
"def import_equipment_object_in_array(path):\n conn = sqlite3.connect(path)\n c = conn.cursor()\n to_return = []\n for row in c.execute('SELECT * FROM '+\"Equipment\").fetchall():\n to_return.append(Equipment(row[0],row[1],row[2],row[3],row[4],row[5]))\n conn.close()\n return to_return",
"def load_byte_index(fp):\n data = json.load(fp)\n index = xml.ByteEncodingOrderedDict()\n for key, value in sorted(data.items(), key=lambda x: x[1]):\n index[key] = value\n return index",
"def find_index(self, obj):\n return self.model.indexlist[obj]",
"def __getitem__(self, index):\n return self._record_list[index]",
"def reader(list, index_list):\r\n\tnewlist = []\r\n\tfor i in index_list:\r\n\t\tnewlist.append(list[i])\r\n\treturn newlist",
"def get_item(filename, uuid):\n with open(os.fsencode(str(filename)), \"r\") as f:\n data = json.load(f)\n results = [i for i in data if i[\"uuid\"] == str(uuid)]\n if results:\n return results\n return None",
"def address_shop_index_in_list(filename):\r\n data = sort_by_address(filename)\r\n address_list = []\r\n for i in data:\r\n address_list.append([i[2],i[4],i[-1]])\r\n return address_list",
"def get_obj(self, idx):\n if idx >= self.object_dataset.get_nb_obj():\n raise ValueError(\"idx is greater than the number of objects\")\n return self.object_dataset.get_obj(idx)",
"def __getitem__(self, t):\n collection = {}\n\n # read raw data and unpack (if necessary)\n for typ in self.files.keys():\n scan_data = None\n if typ == \"label\":\n scan_data = np.fromfile(self.files[typ][t], dtype=np.uint16)\n else:\n scan_data = unpack(np.fromfile(self.files[typ][t], dtype=np.uint8))\n\n # turn in actual voxel grid representation.\n collection[typ] = scan_data.reshape(VOXEL_DIMS)\n\n return self.filenames[t], collection",
"def __getitem__(self,idx):\n try:\n return self._cache[idx]\n except:\n pass\n\n try:\n # return full data entry as list\n out = self._data[idx]\n self._cache[idx] = out\n return out\n except:\n try:\n # return data entry with specified key word\n out = self._data[idx[0]][self._header[self._alias[idx[1]]]]\n self._cache[idx] = out\n return out\n except:\n pass",
"def _getRecordBatch(idList) :\n handle = Entrez.efetch(db = \"nuccore\", rettype = \"gbwithparts\",\n retmode = \"text\", id = \",\".join(idList))\n r = handle.read()\n handle.close()\n return r",
"def __getitem__( self, index ) :\n\n return( self.__entries[index] )",
"def _get_object(data, position, obj_end, opts, dummy):\n obj_size = bson._UNPACK_INT(data[position:position + 4])[0]\n end = position + obj_size - 1\n if data[end:position + obj_size] != b\"\\x00\":\n raise bson.InvalidBSON(\"bad eoo\")\n if end >= obj_end:\n raise bson.InvalidBSON(\"invalid object length\")\n if _raw_document_class(opts.document_class):\n return (opts.document_class(data[position:end + 1], opts),\n position + obj_size)\n\n obj = _elements_to_dict(data, position + 4, end, opts, subdocument=True)\n position += obj_size\n if \"$ref\" in obj:\n return (bson.DBRef(obj.pop(\"$ref\"), obj.pop(\"$id\", None),\n obj.pop(\"$db\", None), obj), position)\n return obj, position",
"def __getitem__(self, index):\n out = super(ImageFromListDataset, self).__getitem__(index)\n out[\"id\"] = self._ids[index]\n return out",
"def fetchindexed(ad):\n\n # Add the macro to the list of recognized macros.\n ad.AddMacro('.fetchindexed', 3, [ ['','symbol'] ]);\n\n # Define the macro functionality.\n def emitFunction(ad,fp,argument):\n (addr,ixBank,bankName) = ad.Emit_GetAddrAndBank(argument[0]);\n ad.EmitPush(fp,addr,ad.Emit_String(argument[0]['value']),argument[0]['loc']);\n ad.EmitOpcode(fp,ad.InstructionOpcode('+'),'+');\n ad.EmitOpcode(fp,ad.specialInstructions['fetch'] | ixBank,'fetch '+bankName);\n\n ad.EmitFunction['.fetchindexed'] = emitFunction;",
"def __getitem__(self, index: Any) -> Any:\n return self.contents[index]",
"def __getitem__(self, item):\n if isinstance(item, str):\n item = [i for i, v in enumerate(self.list) if item == v.name]\n if len(item) > 0:\n item = item[0]\n return self.list[item]",
"def get_index_content(obj, **kw):\n path = obj\n\n # NOTE: It's already a read content list\n if isinstance(obj, list):\n return obj\n elif isinstance(obj, dict):\n # NOTE: It's a resource with content inside...\n if 'content' in obj:\n return obj['content']\n # NOTE: It's a resource with a path that should be read.\n elif 'path' in obj:\n path = obj['path']\n # NOTE: It's just a path, read it\n return utils._read(path, **kw)",
"def index(self, bytes_gen: Iterator[bytes] = None, **kwargs):\n self._call_client(bytes_gen, mode='index', **kwargs)",
"def _load_image_set_index(self, anno_filepath):\n # Check\n assert os.path.exists(anno_filepath), \\\n 'Path does not exist: {}'.format(anno_filepath)\n # Open and read\n with open(anno_filepath) as f:\n # format: imgidx x1 y1 x2 y2 label_list\n # whre label list look like this: 0 0 0 0 1 0 0 (assume here has six action classes)\n image_index = [x.strip().split()[0] for x in f.readlines()]\n # \n return image_index",
"def load(self, file, index_in_file):\n try:\n arr = self.get(file)\n except KeyError:\n arr = np.load(file)\n\n # Need to call set each time so we know what the LRU item is.\n self.set(file, arr)\n\n return arr[index_in_file]",
"def find_object(field, list):\n for item in list:\n if item.name == field:\n return item\n return None"
] | [
"0.6527637",
"0.57819504",
"0.56898844",
"0.56373376",
"0.5492448",
"0.53973085",
"0.5301742",
"0.5224329",
"0.52219355",
"0.52150476",
"0.51943016",
"0.51909566",
"0.51579016",
"0.51561844",
"0.5147235",
"0.51413095",
"0.51280373",
"0.50871134",
"0.5073669",
"0.5071198",
"0.5059776",
"0.5048264",
"0.50387573",
"0.5029411",
"0.50290895",
"0.5026386",
"0.5024855",
"0.50204617",
"0.5018296",
"0.5016931"
] | 0.64558613 | 1 |
Run currently selected test code. Common code runs here, specific code is invoked through test_code_list and test_code_ID. Optional parameters can be passed if needed (unnamed or named), interpreted accordingly by selected test code. | def run_test_code(self, *test_code_args, **test_code_kwargs):
try:
# here, trigger start code from challenge def (to simulate VM failure), manage Recovery time measurement,
# specific monitoring of VNF, trigger stop code from challenge def
time1 = datetime.now() # get time as soon as execution starts
# create challenge execution instance
chall_exec_ID = 1 # ideally, would be incremented, but need to maintain a number of challenge executions somewhere. or could be random.
chall_exec_name = 'challenge execution' # challenge def ID is already passed
chall_exec_challDefID = self.challenge_def_ID
chall_exec = ChallengeExecution(chall_exec_ID, chall_exec_name, chall_exec_challDefID)
chall_exec.log.append_to_list('challenge execution created')
# create test execution instance
test_exec_ID = 1 # ideally, would be incremented, but need to maintain a number of text executions somewhere. or could be random.
test_exec_name = 'test execution' # test def ID is already passed
test_exec_testDefID = self.ID
test_exec_userID = '' # or get user name from getpass module: import getpass and test_exec_userID = getpass.getuser()
test_exec = TestExecution(test_exec_ID, test_exec_name, test_exec_testDefID, chall_exec_ID, test_exec_userID)
test_exec.log.append_to_list('test execution created')
# get time1 before anything else, so the setup time is counted
test_exec.start_time = time1
# get challenge definition instance, and start challenge
challenge_def = get_indexed_item_from_list(self.challenge_def_ID, AutoResilGlobal.challenge_definition_list)
challenge_def.run_start_challenge_code()
# memorize challenge start time
chall_exec.start_time = datetime.now()
test_exec.challenge_start_time = chall_exec.start_time
# call specific test definition code, via table of functions; this code should monitor a VNF and return when restoration is observed
test_code_index = self.test_code_ID - 1 # lists are indexed from 0 to N-1
# invoke corresponding method, via index; could check for return code
self.test_code_list[test_code_index](*test_code_args, **test_code_kwargs)
# memorize restoration detection time and compute recovery time
test_exec.restoration_detection_time = datetime.now()
recovery_time_metric_def = get_indexed_item_from_file(1,FILE_METRIC_DEFINITIONS) # get Recovery Time metric definition: ID=1
test_exec.recovery_time = recovery_time_metric_def.compute(test_exec.challenge_start_time, test_exec.restoration_detection_time)
# stop challenge
challenge_def.run_stop_challenge_code()
# memorize challenge stop time
chall_exec.stop_time = datetime.now()
chall_exec.log.append_to_list('challenge execution finished')
# write results to CSV files, memorize test finish time
chall_exec.write_to_csv()
test_exec.finish_time = datetime.now()
test_exec.log.append_to_list('test execution finished')
test_exec.write_to_csv()
except Exception as e:
print(type(e), e)
sys.exit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_code001(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code001 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def run_code(self, test):\n for action in test:\n self.assertEquals(1, len(action))\n action_type, action = list(action.items())[0]\n\n if hasattr(self, \"run_\" + action_type):\n getattr(self, \"run_\" + action_type)(action)\n else:\n raise InvalidActionType(action_type)",
"def _run_test(self, test_cases):\n # type: (List[TestCaseInterface]) -> None\n if not test_cases:\n return\n self._test_names_to_test_states.update({\n test_cases[0].get_name(): TestCaseState(test_cases[0], test_cases[1:])})\n self._test_names_to_processes.update(\n {test_cases[0].get_name(): subprocess.Popen(\n test_cases[0].run_test_command, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)})\n print('Started executing: {}'.format(test_cases[0].get_name()))",
"def test_code007(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code007 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def doTestCase(self, testSM, desc=\"Nondescript TestCase!\",\n dir=None, smList=[],\n spinSteps=500,\n preserveImpl=False,\n testAutocodeFailure=False,\n expectFile=None, autocodeOpts=\"\"):\n # configure a few optional parameters\n if dir is None: # set dir same as testSM if not set\n dir = testSM\n\n # call parent doTestCase\n if not verifier.Verifier.doTestCase(self, testSM, desc,\n os.sep.join([self.TESTDIR, dir]),\n smList, None, preserveImpl,False, testAutocodeFailure):\n return\n print\n #\n if not self._rerunOnly: # don't regen if re-running last build\n print \"Autocoding \" + self._testSM\n # if 'smList' supplied, construct a list of StateMachine options\n opts=autocodeOpts\n for sm in smList:\n opts += \" -sm %s\" % sm\n self._autocoder(ext=\".zip\", opts=opts, target=\"-promela\")\n #\n result = self.RESULT_FAIL\n if testAutocodeFailure: # Only check if autocode failed\n file = os.sep.join([self._dir, self.AUTOCODE_DIR, self._testSM+'.pml'])\n if not os.path.exists(file): # not finding it is a PASS\n result = self.RESULT_PASS\n else: # Run the autocoded product\n if self._buildAndStop: # don't run test\n self._endTestCase(verifier.TestReporter.TEST_SKIP)\n return\n print\n print \"Starting test: \" + self.COLORS['bold'] + desc + self.COLORS['default']\n\n os.chdir(self._dir + os.sep + \"autocode\")\n if spinSteps is None or spinSteps == 0:\n cmd = \"spin -n100 Main.pml\"\n else:\n cmd = \"spin -n100 -u500 Main.pml\"\n\n self._startApp(cmdStr=cmd)\n result = self._checkResults(expectFile=expectFile)\n\n #\n self._endTestCase(result)",
"def execute(self, code: int = 0, args: Optional[List[str]] = None, fail: str = \"\", verbose: bool = False):\n assert fail in AssemblyTest._can_fail, f\"Invalid fail={fail}. Can only fail: {list(AssemblyTest._can_fail)}\"\n\n \"\"\" As soon as this function is called, the AssemblyTest is considered \"executed\" for the duration of the life cycle of this test and should be treated as such. \"\"\"\n self._has_executed = True\n\n # turn function to fail into a define\n if len(fail) == 0:\n defines = []\n else:\n ret = 0 if fail == 'malloc' else -1\n defines = [\"--def\", f\"#{fail.upper()}_RETURN_HOOK=li a0 {ret}\"]\n\n # check arguments\n if args is not None:\n # TODO: check to see if any args clash with venus arguments\n assert len(args) > 0, \"use None if you don't want to pass any arguments\"\n for a in args:\n assert not a.startswith('-'), f\"argument '{a}' starting with '-' is not allowed\"\n # all arguments could potentially be filenames that we write to, so let's just add them\n self._write_files |= set(args)\n else:\n # ensure that args is always a list\n args = []\n\n lines = []\n\n lines += [f\".import ../../src/{i}\" for i in self._imports]\n lines += [\"\", \".data\"] + self.data\n lines += [\"\", \".globl main_test\", \".text\", \"# main_test function for testing\", \"main_test:\"]\n\n # prologue\n if len(self._output_regs) > 0:\n assert len(self._output_regs) < 13, f\"Too many output registers: {len(self._output_regs)}!\"\n p = [\"# Prologue\", f\"addi sp, sp, -{4 * (len(self._output_regs) + 1)}\", \"sw ra, 0(sp)\"]\n p += [f\"sw s{i}, {(i+1) * 4}(sp)\" for i in range(len(self._output_regs))]\n lines += _indent(p + [\"\"])\n\n\n lines += _indent(self._args)\n\n assert self._call is not None, \"No function was called!\"\n foo_call = [\"\", f\"# call {self._call} function\", f\"jal ra {self._call}\"]\n lines += _indent(foo_call)\n\n if len(self._output_regs) > 0:\n lines += _indent([\"\", \"# save all return values in the save registers\"])\n lines += _indent([f\"mv s{i} a{i}\" for i in self._output_regs] + [\"\"])\n\n lines += _indent(self._checks)\n if code != 0:\n lines += _indent([f\"# we expect {self._call} to exit early with code {code}\"])\n\n lines += _indent([\"\", \"# exit normally\"])\n # epilogue\n if len(self._output_regs) > 0:\n p = [\"# Epilogue\", \"lw ra, 0(sp)\"]\n p += [f\"lw s{i}, {(i + 1) * 4}(sp)\" for i in range(len(self._output_regs))]\n p += [f\"addi sp, sp, {4 * (len(self._output_regs) + 1)}\"]\n lines += _indent(p + [\"\"])\n # lines += _indent([\"mv a0, zero\", \"ret\"])\n lines += _indent([\"jal exit\"])\n lines += [\"\"]\n\n if verbose: print()\n filename = save_assembly(self.name, '\\n'.join(lines), verbose=verbose)\n r, coverage = run_venus(filename, self.check_calling_convention, defines, args, verbose=verbose)\n _process_coverage(coverage, self._assembly)\n self._program_executed = True\n self._std_out = r.stdout.decode('UTF-8')\n if r.returncode != code:\n self._print_failure(r, code)",
"def test_code009(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code009 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)",
"def test_code008(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code008 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code006(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code006 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code003(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code003 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code002(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code002 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def run_test(test_name):\n\n print 'Running %s_test...' % test_name\n os.system('./test_%s.py' % test_name)\n print",
"def test_code004(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code004 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def doTestCase(self, testSM, desc=\"Nondescript TestCase!\",\n dir=None, smList=[], script=None,\n preserveImpl=False, useSimState=False,\n testAutocodeFailure=False,\n expectFile=None, autocodeOpts=\"\"):\n # configure a few optional parameters\n if dir is None: # set dir same as testSM if not set\n dir = testSM\n # call parent doTestCase\n if not verifier.Verifier.doTestCase(self, testSM, desc,\n os.sep.join([self.BASEDIR, \"test\", dir]),\n smList, script, preserveImpl,\n useSimState, testAutocodeFailure):\n return\n #\n # Check if C test suite can proceed, checking for QF-C lib\n self.assertTrue(os.path.exists(os.sep.join([self.BASEDIR, \"linux\", \"libqf.a\"])),\\\n \"*** QF_C++ library not compiled, could not proceed!\\n\" +\\\n \"==> Please go to QF_Cpp/ and execute 'make clean all' first\")\n print\n #\n if not self._rerunOnly: # don't regen if re-running last build\n print \"Autocoding \" + self._testSM + \" and fetching its signals...\"\n # if 'smList' supplied, construct a list of StateMachine options\n opts = autocodeOpts + \" -cppsignals\"\n for sm in smList:\n opts += \" -sm %s\" % sm\n self._autocoder(ext=\".mdxml\", target=\"-cpp\",\n opts=opts, javaOpts=\"-DDEFINE_MAIN\")\n #\n result = self.RESULT_FAIL\n if testAutocodeFailure: # Only check if autocode failed\n file = os.sep.join([self._dir, self.AUTOCODE_DIR, self._testSM+'.cpp'])\n if not os.path.exists(file): # not finding it is a PASS\n result = self.RESULT_PASS\n else: # Compile and run the autocoded product\n if not self._rerunOnly: # don't recompile if re-running last build\n print \"Compiling State Machine application...\"\n self._compileSM(smList)\n # This if should be executed even for re-run, to prevent bad feature interaction\n if self._buildAndStop: # don't run test\n self._endTestCase(verifier.TestReporter.TEST_SKIP)\n return\n #\n # This needs to be done even if re-running\n print \"Process StateChartSignals...\"\n self._processStatechartSignals()\n #\n print\n print \"Starting test: \" + self.COLORS['bold'] + desc + self.COLORS['default']\n if os.path.exists(os.sep.join([self._dir, self.AUTOCODE_DIR, self._testSM])):\n self._startApp(targetApp=self._testSM)\n result = self._checkResults(expectFile=expectFile)\n elif os.path.exists(os.sep.join([self._dir, self.AUTOCODE_DIR, \"linux\", \"active\"])):\n self._startApp(targetApp=\"linux/active\")\n result = self._checkResults(expectFile=expectFile)\n else:\n print \"ERROR! No executable exists, please autocode/build first!\"\n result = verifier.TestReporter.RESULT_UNKNOWN\n #\n self._endTestCase(result)",
"def run_tests(self):\n raise NotImplementedError",
"def run_test(self):\n raise NotImplementedError",
"def _execute_test_step(self):\n\n test_step_command = '{0}'.format(self._test_exec) if self._interpreter == '' else \\\n '{0} {1}'.format(self._interpreter, self._test_exec)\n\n # TODO: Expand user and environment variables.\n test_params = ['{0} {1}'.format(x,y) if y != '' else '{0}'.format(x)\n for x,y in self._test_params.items()]\n\n exit_code, out = self._staf_start_proc(test_step_command,\n self._remote_target_path,\n self._timeout,\n test_params,\n location=self._sut.network_address)\n\n if exit_code != 0:\n raise CoreError('Test step \"{0}\" failed: {1}'.format(self._description, out))",
"def runTests(self):\n \n pass",
"def run(self):\n if self.verbose:\n print(f'Running {self.name} tests...')\n\n # try running setup if there is one\n if self.setup:\n self.__process_setup()\n\n final_report = [None] * len(self.tests)\n\n for test_in, test_out in sorted(self.tests.items()):\n # increment total num of tests\n self.total += 1\n\n if self.verbose:\n print(f'#{self.total}')\n\n # evaluate test input w/ setup vars, if any\n try:\n inp = eval(test_in, self.vars)\n except Exception as err:\n print(f'Issue during evaluation of test input: {err}')\n final_report[self.total - 1] = 'input eval error'\n if self.verbose:\n print(f'Test input was: {test_in}')\n print('Vars from execution: {}'.format({k : v for k, v in self.vars.items() if k != '__builtins__'}))\n continue\n\n \n # checking if function input has more than one arg\n if type(inp) in (list, tuple):\n try:\n student_out = self.student_function(*inp)\n except Exception as err:\n print(f'Issue while running student code: {err}')\n final_report[self.total - 1] = f'student code error: {err}; input: {inp}; func_name: {self.name}'\n if self.verbose:\n print(f'Function being run was: {self.name}')\n print(f'Inputs were: {inp}')\n continue\n else:\n try:\n student_out = self.student_function(inp)\n except Exception as err:\n print(f'Issue while running student code: {err}')\n final_report[self.total - 1] = f'student code error: {err}; input: {inp}; func_name: {self.name}'\n if self.verbose:\n print(f'Function being run was: {self.name}')\n print(f'Input was: {inp}')\n continue\n\n # ans alias for ease of answer checking\n self.vars['ans'] = student_out\n\n if self.schema:\n format_vals = eval(test_out, self.vars)\n results, maybe_failed_schema = self.__process_schema(format_vals)\n if all(results):\n self.correct += 1\n final_report[self.total - 1] = 'PASSED'\n else:\n # failed at least one of the tests\n failed_str = \" and \".join([\", \".join(maybe_failed_schema[:-1]),maybe_failed_schema[-1]] if len(maybe_failed_schema) > 2 else maybe_failed_schema)\n final_report[self.total - 1] = f'FAILED; failed following assertion(s): {failed_str}'\n else:\n expected_ans = eval(test_out, self.vars)\n if student_out == expected_ans:\n self.correct += 1\n final_report[self.total - 1] = 'PASSED'\n else:\n # failed the only test\n final_report[self.total - 1] = f'FAILED; got {repr(student_out)} but expected {repr(expected_ans)}'\n\n # run callback function, if there is one\n if self.callback:\n if self.verbose:\n print('Running callback...')\n print('call back is:', self.callback)\n\n # once done, put the final report on the queue\n self.queue.put((self.student_username, self.name, f'{self.correct}/{self.total}', final_report))",
"def run(self, cmd, code):\n # It has to start with `\"syntax_test\"`\n # and has a specific first line, technically.\n # We only require one of each\n # (to also lint unsaved views).\n basename = os.path.basename(self.filename)\n if not basename or not basename.startswith(\"syntax_test\"):\n # This actually gets reported by the test runner,\n # so we only check for an additionally qualifying file\n # if the filename check fails.\n first_line = code[:code.find(\"\\n\")]\n match = re.match(r'^(\\S*) SYNTAX TEST \"([^\"]*)\"', first_line)\n if not match:\n return\n\n # The syntax test runner only operates on resource files that the resource loader can load,\n # which must reside in a \"Packages\" folder\n # and has the restriction of only working on saved files.\n # Instead, we create a temporary file somewhere in the packages folder\n # and pass that.\n with _temporary_resource_file(code, prefix=\"syntax_test_\") as resource_path:\n assertions, test_output_lines = sublime_api.run_syntax_test(resource_path)\n\n output = \"\\n\".join(test_output_lines)\n if persist.debug_mode():\n persist.printf('{}: \"{}\" assertions: {}'.format(p_name, basename, assertions))\n # SublimeLinter internally already prints the output we return\n # persist.printf('{}: \"{}\" output: \\n {}'.format(p_name, basename,\n # \"\\n \".join(test_output_lines)))\n\n return output",
"def runcode(self, code):\n if not self.locals.get('autocommit', None):\n return self.locals['db'].transact(code.InteractiveConsole.runcode, self, code)\n return code.InteractiveConsole.runcode(self, code)",
"def main():\n parser = ArgumentParser(description=__doc__,\n formatter_class=RawTextHelpFormatter)\n parser.add_argument('code', help='Python code to execute')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-3', action='store_const', dest='python',\n const='python3', help='Explicitly use Python 3')\n group.add_argument('-2', action='store_const', dest='python',\n const='python2', help='Explicitly use Python 2')\n group.add_argument('-p', '--python', help='Specify python interpreter')\n args = parser.parse_args()\n if args.python is not None:\n call([args.python, __file__, args.code])\n else:\n InteractiveInterpreter(LocalsImportDict()).runsource(args.code)",
"def run(path):\n config = conf.get_yaml_field(gl.configFile)\n exe_con = config['ENABLE_EXECUTION']\n exe_num = config['EXECUTION_NUM']\n rerun = config['ENABLE_RERUN']\n reruns_nums = config['RERUN_NUM']\n repeat = config['ENABLE_REPEAT']\n repeat_num = config['REPEAT_NUM']\n exec_mode = config['ENABLE_EXEC_MODE']\n debug_mode = config['ENABLE_DEBUG_MODE']\n last_failed = config['ENABLE_LAST_FAILED']\n failed_first = config['ENABLE_FAILED_FIRST']\n\n # custom function\n RunTestCase.copy_custom_function()\n\n # failed first\n failed_first_args = (' --ff ' if failed_first else '') if not last_failed else ''\n\n # last failed\n last_failed_args = (' --lf ' if last_failed else '') if not failed_first else ''\n\n # Enable repeat case.\n repeat_args = ' --count={} '.format(repeat_num) if repeat else ''\n\n # Enable CPU concurrency\n py_args = ' -n {} '.format(exe_num) if exe_con else ''\n\n # Enable failed retry\n reruns_args = ' --reruns {} '.format(reruns_nums) if rerun else ''\n\n # debug mode print debug info.\n debug = '' if debug_mode else '--tb=no'\n\n \"\"\"\n Load the pytest framework,\n which must be written here or DDT will be loaded first.\n from httptesting.case import test_load_case\n \"\"\"\n case_path = gl.loadcasePath\n # Output mode console or report.\n if exec_mode:\n cmd = 'cd {} && py.test -q -s {} {} {} {}'.format(\n case_path, reruns_args, 'test_load_case.py',\n repeat_args, debug\n )\n else:\n cmd = 'cd {} && py.test {} {} {} {} {} {} --html={} {} --self-contained-html'.format(\n case_path,\n py_args,\n reruns_args,\n last_failed_args,\n failed_first_args,\n 'test_load_case.py',\n repeat_args,\n path,\n debug\n )\n try:\n os.system(cmd)\n except (KeyboardInterrupt, SystemExit):\n print('已终止执行.')",
"def run(context, path=\"\"):\n common.success(f\"Tests {path} running \")\n return start.run_python(\n context,\n f\"-m pytest {path}\"\n )",
"def do_test(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif not self.build['dotest']:\n\t\t\tself.log('Tests configured off, not running',level=logging.DEBUG)\n\t\t\treturn\n\t\t# Test in reverse order\n\t\tself.log('PHASE: test', level=logging.DEBUG)\n\t\tself.stop_all()\n\t\tself.start_all()\n\t\tfor module_id in self.module_ids(rev=True):\n\t\t\t# Only test if it's installed.\n\t\t\tif self.is_installed(self.shutit_map[module_id]):\n\t\t\t\tself.log('RUNNING TEST ON: ' + module_id, level=logging.DEBUG)\n\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\tif not self.shutit_map[module_id].test(self):\n\t\t\t\t\tself.fail(module_id + ' failed on test', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover\n\t\t\t\tself.logout(echo=False)",
"def main():\n\n parser = argparse.ArgumentParser(prog=\"run_test.py\",\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument('id', help=\"Id of a test\")\n args = parser.parse_args()\n\n configure_logger()\n\n test_info = TESTS.get(args.id, None)\n if not test_info:\n test_info.log.error(f'{args.id} does not exist')\n exit(ErrorCode.CRITICAL)\n os.environ['DISPLAY'] = \":0.0\"\n\n test = Test(args.id, test_info)\n result = test.run()\n\n test.log.info('#' * 80)\n if not result:\n test.log.error('TEST FAILED')\n else:\n test.log.info('TEST PASSED')\n test.log.info('#' * 80)\n exit(not result)",
"def run(self):\n list_test_scenarios = self.__get_list_scenarios_in_folder()\n\n if not list_test_scenarios:\n utils.print_error(\n \"\\n{}\\n\".format(constant.ERR_CANNOT_FIND_ANY_TEST_SCENARIOS))\n exit(1)\n\n (tests_pass, tests_fail) = self.__execute_tests(list_test_scenarios)\n\n complete_message = constant.INFO_TEST_PASS_FAIL.format(\n tests_pass, tests_fail)\n\n print(complete_message)\n\n self.__execute_reporter()",
"def run_tests(event, context):\n try:\n jobId = event['CodePipeline.job']['id']\n user_parameters = json.loads(event['CodePipeline.job']['data']['actionConfiguration']['configuration']['UserParameters'])\n runscope_trigger_url = user_parameters['runscopeTriggerUrl']\n runscope_access_token = user_parameters['runscopeAccessToken']\n\n tests = start_tests(runscope_trigger_url)\n aggregate_status = wait_for_tests_to_complete(tests, runscope_access_token)\n if aggregate_status == \"pass\":\n code_pipeline.put_job_success_result(jobId=jobId)\n else:\n code_pipeline.put_job_failure_result(jobId=jobId, failureDetails={\n 'type': 'JobFailed',\n 'message': 'One or more tests failed'\n })\n except:\n code_pipeline.put_job_failure_result(jobId=jobId, failureDetails={\n 'type': 'JobFailed',\n 'message': 'Unhandled exception during Runscope tests execution'\n })"
] | [
"0.677512",
"0.67487895",
"0.6626339",
"0.6552723",
"0.6492401",
"0.64204943",
"0.64045966",
"0.6393247",
"0.6352505",
"0.6287673",
"0.62475413",
"0.62339044",
"0.62095606",
"0.6190819",
"0.6190053",
"0.6165832",
"0.60683763",
"0.60556114",
"0.6042067",
"0.60127425",
"0.5986645",
"0.5980521",
"0.5966012",
"0.59649193",
"0.59277606",
"0.5926554",
"0.5911577",
"0.588517",
"0.58811635",
"0.58793706"
] | 0.68803483 | 0 |
Test case code number 001. | def test_code001(self, *test_code_args, **test_code_kwargs):
print("This is test_code001 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code009(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code009 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code008(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code008 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code007(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code007 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code006(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code006 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code002(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code002 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code003(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code003 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code004(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code004 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code001 from ChallengeDefinition #\",self.ID, sep='')",
"def test_generate_barcode_ean8(self):\n pass",
"def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def test_generate_barcode_ean13(self):\n pass",
"def test_zero_case(self):\n self.assertEqual(factorial(0), 1)",
"def test_price_code_0(self):\n start_date = datetime(2016, 11, 13)\n # Note this is a Sunday, and must have timedelta added\n\n for x in range(1, 7):\n with self.subTest(x=x):\n date = start_date + timedelta(x)\n result = star_barcode.barcode_filename(date, x)\n self.assertEqual(\n result.split('.')[0][-2:],\n f'{x:02}'\n )",
"def test_T01():",
"def testIntcodeProgram():\n\n testData = [\n {\n \"input\": [1, 0, 0, 0, 99],\n \"output\": [2, 0, 0, 0, 99]\n },\n {\n \"input\": [2, 3, 0, 3, 99],\n \"output\": [2, 3, 0, 6, 99]\n },\n {\n \"input\": [2, 4, 4, 5, 99, 0],\n \"output\": [2, 4, 4, 5, 99, 9801]\n },\n {\n \"input\": [1, 1, 1, 4, 99, 5, 6, 0, 99],\n \"output\": [30, 1, 1, 4, 2, 5, 6, 0, 99]\n },\n ]\n\n overallSuccess = True\n\n for test in testData:\n input = test['input']\n expectedResult = test['output']\n\n result = runIntcode(input.copy())\n\n if result == expectedResult:\n print (\"Testing\", input, \"... ok\")\n else:\n print (\"Testing\", input, \"... fail, got \", result)\n overallSuccess = False\n\n return overallSuccess",
"def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')",
"def test_counter_start_at_zero(self):\n pass",
"def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def create_code():\n\n code = [0, 0, 0, 0]\n\n for i in range(4):\n value = random.randint(1, 8) # 8 possible digits\n while value in code:\n value = random.randint(1, 8) # 8 possible digits\n code[i] = value\n \n #print(code)\n return code",
"def test_hello_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_hello_failed_code.__iter__()\n length = self.test_hello_failed_code.__len__()\n\n while value < self.MAX_HELLO_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_HELLO_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1",
"def test_generic_failed_code_value(self):\n value = 0\n\n for elem in self.test_generic_failed_code:\n self.assertEqual(value, elem)",
"def test_agent_code_sql(self):\n self._db(self._agent.agent_code_sql(code='N031'))\n received = list(self._db.rows())\n expected = [(1, )]\n msg = 'Agent code \"N031\" code not as expected'\n self.assertEqual(received, expected, msg)",
"def test_fix_code_typical_code():\r\n\r\n pass",
"def checkdigit(code):\n check = sum((i+1)*int(code[i]) for i in range(9)) % 11\n return 'X' if check == 10 else str(check)",
"def test_bad_action_code_value(self):\n\n value = 0\n iter_given_code = self.test_bad_action_code.__iter__()\n length = self.test_bad_action_code.__len__()\n\n while value < self.MAX_BAD_ACTION_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_BAD_ACTION_CODE_VALUE:\n value += 1\n\n length -= 1",
"def test_01_basic(self):\n self.assertTrue(True)\n self.assertEqual(0, 0)",
"def test_0001(self):\n assert self.vca.token",
"def test_0001(self):\n assert self.vca.token",
"def test_0001(self):\n assert self.vca.token"
] | [
"0.6850627",
"0.66204584",
"0.646728",
"0.6306222",
"0.6301501",
"0.60464233",
"0.6036792",
"0.5991052",
"0.5896127",
"0.5882364",
"0.5882116",
"0.58683854",
"0.5861891",
"0.58524764",
"0.58049977",
"0.57959384",
"0.5779963",
"0.5769177",
"0.5752419",
"0.5743817",
"0.5742925",
"0.57409036",
"0.5727042",
"0.5717984",
"0.5683047",
"0.567582",
"0.566751",
"0.56587386",
"0.56587386",
"0.56587386"
] | 0.6795745 | 1 |
Test case code number 003. | def test_code003(self, *test_code_args, **test_code_kwargs):
print("This is test_code003 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_code008(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code008 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code009(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code009 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code001(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code001 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code006(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code006 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_T01():",
"def test_code007(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code007 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code002(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code002 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_3():",
"def test_code004(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code004 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_T3():",
"def test_T3():",
"def exercise_b2_113():\r\n pass",
"def test_4():",
"def test_fix_code_typical_code():\r\n\r\n pass",
"def test_count_361_080(self):\n value: int = 361_080\n result: int = 188_067\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def exercise_b2_106():\r\n pass",
"def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def test_T0():",
"def test_5():",
"def test_0001(self):\n assert self.vca.token",
"def test_0001(self):\n assert self.vca.token",
"def test_0001(self):\n assert self.vca.token",
"def start_challenge_code003(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code003 from ChallengeDefinition #\",self.ID, sep='')",
"def exercise_b2_107():\r\n pass",
"def test_generate_barcode_ean13(self):\n pass",
"def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --",
"def test_T4():",
"def test_T4():",
"def test_count_361_087(self):\n value: int = 361_087\n result: int = 188_067\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')"
] | [
"0.65716606",
"0.6559461",
"0.6456624",
"0.63518673",
"0.6318272",
"0.62671703",
"0.62133527",
"0.6069862",
"0.6031914",
"0.6030209",
"0.5982141",
"0.5982141",
"0.5874074",
"0.58359516",
"0.5817474",
"0.5689604",
"0.5688705",
"0.56851476",
"0.56775486",
"0.56764436",
"0.56737846",
"0.56737846",
"0.56737846",
"0.5672124",
"0.5664465",
"0.56613207",
"0.56435245",
"0.563862",
"0.563862",
"0.56349146"
] | 0.672484 | 0 |
Test case code number 004. | def test_code004(self, *test_code_args, **test_code_kwargs):
print("This is test_code004 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code009(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code009 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code001(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code001 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code008(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code008 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code007(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code007 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code006(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code006 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_4():",
"def test_code002(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code002 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code003(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code003 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_generate_barcode_ean13(self):\n pass",
"def test_T01():",
"def test_T4():",
"def test_T4():",
"def test_generate_barcode_ean8(self):\n pass",
"def test_4_4_1_1(self):\n pass",
"def test_fix_code_typical_code():\r\n\r\n pass",
"def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --",
"def exercise_b2_113():\r\n pass",
"def test_result_code(self):\n self._compare_avp(\n avp.ResultCodeAVP(268, avp.ResultCode.DIAMETER_SUCCESS),\n memoryview(b'\\x00\\x00\\x01\\x0c\\x00\\x00\\x00\\x0c\\x00\\x00\\x07\\xd1'),\n )\n\n # Test a value we haven't defined\n self._compare_avp(\n avp.ResultCodeAVP(268, 1337),\n memoryview(b'\\x00\\x00\\x01\\x0c\\x00\\x00\\x00\\x0c\\x00\\x00\\x059'),\n )",
"def test_09(self, test):\r\n return test.MANUAL()",
"def test_T0():",
"def test_0001(self):\n assert self.vca.token",
"def test_0001(self):\n assert self.vca.token",
"def test_0001(self):\n assert self.vca.token",
"def test_calculate_cipher_step():\n given_value = d.calculate_cipher_step()\n assert type(given_value) == int\n assert given_value == 1016\n new_decoder = Decoder(filename, \"HELLO THERE!\")\n new_value = new_decoder.calculate_cipher_step()\n assert new_value != given_value\n random_number = random.Random()\n assert given_value != random_number",
"def exercise_b2_107():\r\n pass",
"def exercise_b2_106():\r\n pass",
"def event_m20_11_4000000():\n \"\"\"State 0,2: [Lib] Character: Petrified: Key Guide_SubState\"\"\"\n assert event_m20_11_x37(z94=5300, z95=0, z96=15, z97=211000030, z98=0, z99=1600, z100=6, z101=4000010)\n \"\"\"State 1: Finish\"\"\"\n EndMachine()",
"def test_table_feature_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_table_feature_failed_code.__iter__()\n length = self.test_table_feature_failed_code.__len__()\n\n while value < self.MAX_TABLE_FEATURE_FAILED_CODE_VALUE or length > 0:\n\n if value == 2:\n value = 5\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_TABLE_FEATURE_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1",
"def test_price_code_0(self):\n start_date = datetime(2016, 11, 13)\n # Note this is a Sunday, and must have timedelta added\n\n for x in range(1, 7):\n with self.subTest(x=x):\n date = start_date + timedelta(x)\n result = star_barcode.barcode_filename(date, x)\n self.assertEqual(\n result.split('.')[0][-2:],\n f'{x:02}'\n )"
] | [
"0.67048603",
"0.6592853",
"0.6474699",
"0.64421576",
"0.640215",
"0.6312405",
"0.62041426",
"0.61801666",
"0.6154687",
"0.6145559",
"0.61353433",
"0.6107601",
"0.6107601",
"0.6091312",
"0.6010107",
"0.5868794",
"0.5764935",
"0.5749172",
"0.5733635",
"0.57220316",
"0.567724",
"0.5661999",
"0.5661999",
"0.5661999",
"0.5656633",
"0.56477666",
"0.56408286",
"0.56354475",
"0.561926",
"0.5612135"
] | 0.6673887 | 1 |
Test case code number 006. | def test_code006(self, *test_code_args, **test_code_kwargs):
print("This is test_code006 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code008(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code008 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code007(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code007 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code009(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code009 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_T01():",
"def test_code001(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code001 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_4():",
"def exercise_b2_106():\r\n pass",
"def test_09(self, test):\r\n return test.MANUAL()",
"def test_5():",
"def day_06_a() -> int:\n return 0",
"def test_code004(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code004 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code002(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code002 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def exercise_b2_52():\r\n pass",
"def test_generate_barcode_ean13(self):\n pass",
"def test_generate_barcode_ean8(self):\n pass",
"def test_4_4_1_1(self):\n pass",
"def test_fix_code_typical_code():\r\n\r\n pass",
"def test_code003(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code003 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def day_06_b() -> int:\n return 0",
"def exercise_b2_107():\r\n pass",
"def test_count_361_080(self):\n value: int = 361_080\n result: int = 188_067\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def exercise_b2_113():\r\n pass",
"def test_T4():",
"def test_T4():",
"def test_example_day9_pt2():\n assert find_pt2(ex_data, 127) == 62",
"def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --",
"def exercise_b2_56():\r\n pass",
"def test_count_361_087(self):\n value: int = 361_087\n result: int = 188_067\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')",
"def test_count_361_070(self):\n value: int = 361_070\n result: int = 188_058\n self.assertEqual(count(value), result, f'Between 0 and {value}, there are {result} lucky numbers.')"
] | [
"0.6523085",
"0.64991325",
"0.63460475",
"0.63168234",
"0.6221022",
"0.6153542",
"0.60899824",
"0.59823734",
"0.5976485",
"0.5955973",
"0.5943083",
"0.59364474",
"0.59303397",
"0.58882",
"0.5874332",
"0.586917",
"0.586421",
"0.58636475",
"0.5840483",
"0.5831648",
"0.5820608",
"0.582033",
"0.5808721",
"0.58024746",
"0.58024746",
"0.57992405",
"0.579912",
"0.5785274",
"0.5760836",
"0.57575154"
] | 0.6790483 | 0 |
Test case code number 007. | def test_code007(self, *test_code_args, **test_code_kwargs):
print("This is test_code007 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_code008(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code008 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code009(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code009 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code006(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code006 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code001(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code001 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_T01():",
"def exercise_b2_107():\r\n pass",
"def test_09(self, test):\r\n return test.MANUAL()",
"def exercise_b2_106():\r\n pass",
"def test_generate_barcode_ean8(self):\n pass",
"def test_code003(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code003 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code002(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code002 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def exercise_b2_113():\r\n pass",
"def test_code004(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code004 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def exercise_b2_70():\r\n pass",
"def test_4():",
"def test_example_day9_pt2():\n assert find_pt2(ex_data, 127) == 62",
"def exercise_b2_52():\r\n pass",
"def test_fix_code_typical_code():\r\n\r\n pass",
"def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def test_generate_barcode_ean13(self):\n pass",
"def test_table_feature_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_table_feature_failed_code.__iter__()\n length = self.test_table_feature_failed_code.__len__()\n\n while value < self.MAX_TABLE_FEATURE_FAILED_CODE_VALUE or length > 0:\n\n if value == 2:\n value = 5\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_TABLE_FEATURE_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1",
"def exercise_b2_82():\r\n pass",
"def test_5():",
"def start_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def day_07_a() -> int:\n return 0",
"def test_price_code_0(self):\n start_date = datetime(2016, 11, 13)\n # Note this is a Sunday, and must have timedelta added\n\n for x in range(1, 7):\n with self.subTest(x=x):\n date = start_date + timedelta(x)\n result = star_barcode.barcode_filename(date, x)\n self.assertEqual(\n result.split('.')[0][-2:],\n f'{x:02}'\n )",
"def test_task108_main_logic(number, expected_value):\r\n assert algo.Task108.main_logic(number) == expected_value",
"def exercise_b2_53():\r\n pass",
"def test_task107_main_logic(number, expected_value):\r\n assert algo.Task107.main_logic(number) == expected_value"
] | [
"0.66987306",
"0.6648345",
"0.6489203",
"0.6444564",
"0.62110287",
"0.61403537",
"0.6100074",
"0.5993487",
"0.59749377",
"0.5956436",
"0.5951756",
"0.5938374",
"0.5907481",
"0.5898534",
"0.58809346",
"0.5861907",
"0.5861846",
"0.58603644",
"0.5847938",
"0.5837277",
"0.5835848",
"0.58021253",
"0.57962",
"0.57585275",
"0.57516724",
"0.5748931",
"0.5739652",
"0.5709492",
"0.5704583",
"0.56932586"
] | 0.66724545 | 1 |
Test case code number 008. | def test_code008(self, *test_code_args, **test_code_kwargs):
print("This is test_code008 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code009(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code009 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code006(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code006 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code007(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code007 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_generate_barcode_ean8(self):\n pass",
"def test_code001(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code001 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_T01():",
"def test_decode_barcode_8_ok(self):\r\n self.assertEqual(decode_barcode_8(self.valid_bc_1),\r\n (self.valid_bc_1, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_2),\r\n (self.valid_bc_2, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_3),\r\n (self.valid_bc_3, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_4),\r\n (self.valid_bc_4, 0))\r\n self.assertEqual(decode_barcode_8(self.valid_bc_5),\r\n (self.valid_bc_5, 0))",
"def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def test_code004(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code004 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code002(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code002 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_generate_barcode_ean13(self):\n pass",
"def exercise_b2_107():\r\n pass",
"def exercise_b2_106():\r\n pass",
"def test_code003(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code003 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def exercise_b2_82():\r\n pass",
"def test_4():",
"def exercise_b2_113():\r\n pass",
"def test_fix_code_typical_code():\r\n\r\n pass",
"def test_price_code_0(self):\n start_date = datetime(2016, 11, 13)\n # Note this is a Sunday, and must have timedelta added\n\n for x in range(1, 7):\n with self.subTest(x=x):\n date = start_date + timedelta(x)\n result = star_barcode.barcode_filename(date, x)\n self.assertEqual(\n result.split('.')[0][-2:],\n f'{x:02}'\n )",
"def test_golay600_codes(self):\r\n for bc in golay600:\r\n corr, num_errs = golay.decode(bc)\r\n self.assertEqual(corr, bc)\r\n self.assertEqual(num_errs, 0)",
"def exercise_b2_52():\r\n pass",
"def test_09(self, test):\r\n return test.MANUAL()",
"def test_5():",
"def test_hello_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_hello_failed_code.__iter__()\n length = self.test_hello_failed_code.__len__()\n\n while value < self.MAX_HELLO_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_HELLO_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1",
"def test_decode_barcode_8_one_error(self):\r\n self.assertEqual(decode_barcode_8(self.single_error_1),\r\n (self.single_error_ref, 0.5))\r\n self.assertEqual(decode_barcode_8(self.single_error_2),\r\n (self.single_error_ref, 0.5))\r\n self.assertEqual(decode_barcode_8(self.single_error_3),\r\n (self.single_error_ref, 0.5))\r\n self.assertEqual(decode_barcode_8(self.single_error_4),\r\n (self.single_error_ref, 0.5))\r\n self.assertEqual(decode_barcode_8(self.single_error_5),\r\n (self.single_error_ref, 0.5))\r\n self.assertEqual(decode_barcode_8(self.single_error_6),\r\n (self.single_error_ref, 0.5))\r\n self.assertEqual(decode_barcode_8(self.single_error_7),\r\n (self.single_error_ref, 0.5))\r\n self.assertEqual(decode_barcode_8(self.single_error_8),\r\n (self.single_error_ref, 0.5))",
"def test_T0():",
"def exercise_b2_53():\r\n pass",
"def CASE108( self, main ):\n\n from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import SRRoutingTest\n\n SRRoutingTest.runTest( main,\n test_idx=108,\n onosNodes=3,\n dhcp=1,\n routers=1,\n ipv4=0,\n ipv6=1,\n description=\"Test link failures with IPv6 hosts (including external host configured with route-add command)\",\n checkExternalHost=False,\n countFlowsGroups=False,\n linkFailure=True,\n staticRouteConfigure=True,\n switchFailure=False )",
"def test_task108_main_logic(number, expected_value):\r\n assert algo.Task108.main_logic(number) == expected_value"
] | [
"0.66395617",
"0.6468259",
"0.63304436",
"0.63040304",
"0.6262706",
"0.62446046",
"0.61053824",
"0.610493",
"0.60846525",
"0.6029461",
"0.594432",
"0.59217197",
"0.58982056",
"0.58890414",
"0.5867678",
"0.58675474",
"0.5841142",
"0.5817495",
"0.5798157",
"0.5797314",
"0.5734746",
"0.5707307",
"0.568841",
"0.5649551",
"0.5643574",
"0.56356776",
"0.5633625",
"0.56311536",
"0.56275696",
"0.56274647"
] | 0.70615834 | 0 |
Test case code number 009. | def test_code009(self, *test_code_args, **test_code_kwargs):
print("This is test_code009 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_code010(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code010 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code008(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code008 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code001(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code001 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_generate_barcode_ean13(self):\n pass",
"def test_code007(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code007 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_T01():",
"def test_generate_barcode_ean8(self):\n pass",
"def test_code006(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code006 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def checkdigit(code):\n check = sum((i+1)*int(code[i]) for i in range(9)) % 11\n return 'X' if check == 10 else str(check)",
"def test_hackerrank_sample2(self):\n result = find_digits(1012)\n self.assertEquals(result, 3)",
"def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')",
"def test_hackerrank_sample1(self):\n result = find_digits(12)\n self.assertEquals(result, 2)",
"def test_09(self, test):\r\n return test.MANUAL()",
"def test_code002(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code002 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def test_price_code_0(self):\n start_date = datetime(2016, 11, 13)\n # Note this is a Sunday, and must have timedelta added\n\n for x in range(1, 7):\n with self.subTest(x=x):\n date = start_date + timedelta(x)\n result = star_barcode.barcode_filename(date, x)\n self.assertEqual(\n result.split('.')[0][-2:],\n f'{x:02}'\n )",
"def test_mode_digit():\n print('Testing mode_digit')\n\n # Cases given to test this problem\n assert_equals(1, hw1.mode_digit(12121))\n assert_equals(0, hw1.mode_digit(0))\n assert_equals(2, hw1.mode_digit(-122))\n assert_equals(2, hw1.mode_digit(1211232231))\n\n # Additional cases to test numbers with same digit occurance numbers\n assert_equals(3, hw1.mode_digit(-333000221))\n assert_equals(4, hw1.mode_digit(440011))",
"def test_digit_12_min_base(self):\n expected = 5\n digit = 12\n\n assert expected == min_base(digit)",
"def exercise_b2_107():\r\n pass",
"def test_code004(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code004 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_example_day9_pt2():\n assert find_pt2(ex_data, 127) == 62",
"def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def exercise_b2_113():\r\n pass",
"def exercise_b2_106():\r\n pass",
"def test_task108_main_logic(number, expected_value):\r\n assert algo.Task108.main_logic(number) == expected_value",
"def test_code003(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code003 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_fix_code_typical_code():\r\n\r\n pass",
"def test_sequence_0_to_9(self):\n seqs = list(range(10))\n for seq in seqs:\n with self.subTest(seq=seq):\n result = star_barcode.construct_postscript(\n sequence=seq,\n bwipp_location=self.bwipp,\n issn=self.issn,\n week=20,\n header_line=''\n )\n self.assertGreater(\n result.find(f'{self.issn} {seq:02}'),\n -1\n )",
"def check_digit(raw_code):\n s = sum(code(char) * 2**index for index, char in enumerate(raw_code))\n return s % 11 % 10",
"def test201b(self):\n self.spawn(\"./binary\").stdin(\"0\").stdin(\"2\").stdin(\"201\").stdout(\"11001001\\n\").exit(0)"
] | [
"0.6565975",
"0.63169736",
"0.6043896",
"0.6002611",
"0.59514076",
"0.5924085",
"0.59237796",
"0.5869102",
"0.5832442",
"0.5831146",
"0.58268756",
"0.5801319",
"0.5710214",
"0.5684439",
"0.56668776",
"0.56509745",
"0.56207085",
"0.56201535",
"0.561961",
"0.56169426",
"0.56111157",
"0.5608277",
"0.5601872",
"0.55896693",
"0.55859524",
"0.5553753",
"0.55238557",
"0.5515217",
"0.5512263",
"0.55105126"
] | 0.6604769 | 0 |
Test case code number 010. | def test_code010(self, *test_code_args, **test_code_kwargs):
print("This is test_code010 from TestDefinition #", self.ID, ", test case #", self.test_case_ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_code009(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code009 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code008(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code008 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_T01():",
"def test_code001(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code001 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_code007(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code007 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_generate_barcode_ean8(self):\n pass",
"def test_generate_barcode_ean13(self):\n pass",
"def test_code006(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code006 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_hello_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_hello_failed_code.__iter__()\n length = self.test_hello_failed_code.__len__()\n\n while value < self.MAX_HELLO_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_HELLO_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1",
"def test_fix_code_typical_code():\r\n\r\n pass",
"def test_create10(self):\n pass",
"def test_task108_main_logic(number, expected_value):\r\n assert algo.Task108.main_logic(number) == expected_value",
"def test_4():",
"def test_T0():",
"def test_task107_main_logic(number, expected_value):\r\n assert algo.Task107.main_logic(number) == expected_value",
"def test_generic_failed_code_value(self):\n value = 0\n\n for elem in self.test_generic_failed_code:\n self.assertEqual(value, elem)",
"def test_code002(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code002 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test(self):\n # -- Test --\n\n # (1)\n\n # (2)\n\n # (3)\n\n # (4)\n # -- Test --",
"def test_10(self, test):\r\n return test.MANUAL()",
"def test_bad_action_code_value(self):\n\n value = 0\n iter_given_code = self.test_bad_action_code.__iter__()\n length = self.test_bad_action_code.__len__()\n\n while value < self.MAX_BAD_ACTION_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_BAD_ACTION_CODE_VALUE:\n value += 1\n\n length -= 1",
"def test_code004(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code004 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')",
"def test_5():",
"def exercise_b2_107():\r\n pass",
"def test_T4():",
"def test_T4():",
"def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def exercise_b2_106():\r\n pass",
"def test_4_4_1_1(self):\n pass",
"def test_table_mod_failed_code_value(self):\n\n value = 0\n\n iter_given_code = self.test_table_mod_failed_code.__iter__()\n length = self.test_table_mod_failed_code.__len__()\n\n while value < self.MAX_TABLE_MOD_FAILED_CODE_VALUE or length > 0:\n\n self.assertEqual(value, iter_given_code.__next__())\n\n if value < self.MAX_TABLE_MOD_FAILED_CODE_VALUE:\n value += 1\n\n length -= 1",
"def test_code003(self, *test_code_args, **test_code_kwargs):\n print(\"This is test_code003 from TestDefinition #\", self.ID, \", test case #\", self.test_case_ID, sep='')"
] | [
"0.6681018",
"0.6544875",
"0.6449573",
"0.6427582",
"0.6253291",
"0.6229338",
"0.61912423",
"0.6162741",
"0.60461193",
"0.60454655",
"0.60440755",
"0.6033385",
"0.6002442",
"0.5995288",
"0.5994924",
"0.5955519",
"0.5954421",
"0.5939371",
"0.5928967",
"0.59256786",
"0.59242517",
"0.5921335",
"0.5905011",
"0.58974427",
"0.58974427",
"0.5889411",
"0.58619905",
"0.58513266",
"0.5846169",
"0.5822938"
] | 0.70530176 | 0 |
Function to initialize test definition data. | def init_test_definitions():
test_definitions = []
# add info to list in memory, one by one, following signature values
test_def_ID = 5
test_def_name = "VM failure impact on virtual firewall (vFW VNF)"
test_def_challengeDefID = 5
test_def_testCaseID = 5
test_def_VNFIDs = [1]
test_def_associatedMetricsIDs = [2]
test_def_recipientIDs = [2]
test_def_testCLICommandSent = ["pwd","kubectl describe pods --include-uninitialized=false"]
test_def_testAPICommandSent = ["data1","data2"]
test_def_testCodeID = 5
test_definitions.append(TestDefinition(test_def_ID, test_def_name,
test_def_challengeDefID,
test_def_testCaseID,
test_def_VNFIDs,
test_def_associatedMetricsIDs,
test_def_recipientIDs,
test_def_testCLICommandSent,
test_def_testAPICommandSent,
test_def_testCodeID))
# write list to binary file
write_list_bin(test_definitions, FILE_TEST_DEFINITIONS)
return test_definitions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_setup(self, test_data: list=None):\n print(\"[dataset]: using test setup ...\")\n self.vocabulary = [\"empty\"]\n self.eval_dataset = ABSADataset(data_path=self.dev_path, mode=self.in_mode, task=self.task,\n tokenizer=self.tokenizer, vocab=\"bert\", test=True)\n return",
"def setUpTestData(cls):\n # volunteer user\n common.initialize_empty_volunteer()",
"def setUp(self):\n\n self.data_list = [\n \"hello\", \"world\", \"funilrys\", \"funceble\", \"PyFunceble\", \"pyfunceble\"\n ]\n self.data = \"Hello, this is Fun Ilrys. I just wanted to know how things goes around the tests.\" # pylint: disable=line-too-long",
"def setUp(self):\n self.family = Family()\n self.decoder = Decoder()\n self.data1 = ['Atya', 'Sister-In-Law']\n self.data2 = ['Satya', 'Ketu', 'Male']",
"def setUpTestData(cls):\n data_gen.run()",
"def setUpTestData(cls):\n data_gen.run()",
"def setUpTestData(cls):\n data_gen.run()",
"def setUpTestData(cls):\n data_gen.run()",
"def setUp(self):\n self.dataset = get_test_dataset()",
"def setup_class(self):\n self.data_type = 'pytest'",
"def setUpClass(cls):\n super(Module05Tests, cls).setUpClass()\n cls.datasets = {\n 0: DATASETS_ROOT + 'diffusion_synthetic_normal_L8_r2_slices_41_50_gr15_b1200',\n 1: DATASETS_ROOT + 'filtered',\n 2: DATASETS_ROOT + 'noise'\n }\n cls.data = smns.load_object(file_path=cls.datasets[2])",
"def setUpTestData(cls):\n call_command('loaddata', 'db.json', verbosity=0)",
"def setUp(self):\n\n self.test_data_path = 'testing/test_data/'",
"def setUp(self):\n self.dataset = self.dataset_cls()",
"def setUpTestData(cls):\n\t\thierarchy = Hierarchy(name=\"TestHierarchy\", graph_representation=\"{}\")\n\t\thierarchy.save()\n\t\tevent_type = EventType(name=\"asd\", hierarchy=hierarchy)\n\t\tevent_type.save()\n\t\tquery = Query(\n\t\t\thierarchy=hierarchy, query_string=\"INSERT INTO asd SELECT * FROM asd\",\n\t\t\toutput_event_type=event_type,\n\t\t\teqmn_representation=\"{'output': {'name': 'asd', 'select': '*'}, 'input': {'single': 'asd'}}\")\n\t\tquery.save()\n\t\tquery.input_event_types.add(event_type)\n\t\tquery.save()",
"def test_init(self, testdata: TestData) -> None:\n for data in testdata['observation_type']:\n observation_type = ObservationType(**data)\n for key, value in data.items():\n assert getattr(observation_type, key) == value",
"def test_init(self):\n test_data = (\n (self.segment.input_file, self.EXPECTED_INPUT_FILE,\n \"input file = {v}\".format(v=self.EXPECTED_INPUT_FILE)),\n (self.segment.punch_in, self.EXPECTED_PUNCH_IN,\n \"punch in = {v}\".format(v=self.EXPECTED_PUNCH_IN)),\n (self.segment.punch_out, self.EXPECTED_PUNCH_OUT,\n \"punch out = {v}\".format(v=self.EXPECTED_PUNCH_OUT)),\n (self.segment.input_stream, self.EXPECTED_INPUT_STREAM,\n \"input stream = {v}\".format(v=self.EXPECTED_INPUT_STREAM)),\n (self.segment._temp_file, self.EXPECTED_TEMP_FILE,\n \"temp file = {v}\".format(v=self.EXPECTED_TEMP_FILE)),\n (self.segment._temp_suffix, self.EXPECTED_TEMP_SUFFIX,\n \"temp suffix = {v}\".format(v=self.EXPECTED_TEMP_SUFFIX)),\n (self.segment._temp_files_list, self.EXPECTED_TEMP_LIST,\n \"temp files list = {v}\".format(v=self.EXPECTED_TEMP_LIST)),\n (self.segment._TYPE, self.EXPECTED_TYPE,\n \"type = {v}\".format(v=self.EXPECTED_TYPE)),\n (self.segment._TRIM, self.EXPECTED_TRIM,\n \"trim = {v}\".format(v=self.EXPECTED_TRIM)),\n (self.segment._SETPTS, self.EXPECTED_SETPTS,\n \"setpts = {v}\".format(v=self.EXPECTED_SETPTS)),\n )\n for actual, expected, description in test_data:\n with self.subTest(msg=description):\n self.assertEqual(actual, expected)",
"def setUpClass(cls):\n values = {'A': 'a', 'B': 'b'}\n dummy_record = MetadataRecord(**values)\n cls.records = [dummy_record]",
"def setUp(self):\n self.env = EnvironmentStub(default_data=True,\n enable=['ticket-field-config.*'])\n\n # this is the default data that is in the test Trac database\n self.default = {\n 'priority':['blocker', 'critical', 'major', 'minor', 'trivial'],\n 'severity':[],\n 'resolution': ['fixed','invalid','wontfix','duplicate','worksforme'],\n 'ticket_type':['defect', 'enhancement', 'task'],\n 'component':['component1', 'component2'],\n }\n\n # this is the new data we plan to put in configuration\n self.new = {\n 'priority': ['P1','P2','P3'],\n 'severity': ['High','Medium','Low'],\n 'resolution': ['fixed','wontfix','invalid','duplicate','worksforme'],\n 'ticket_type': ['Bug','Release','Project'],\n 'component': ['new/blog','new/site','old/blog','old/site'],\n }",
"def setUp(self):\n self.TestData = array([0,1,1,4,2,5,2,4,1,2])\n self.NoSingles = array([0,2,2,4,5,0,0,0,0,0])\n self.NoDoubles = array([0,1,1,4,5,0,0,0,0,0])",
"def setUp(self):\n self.data = DatabaseIntermediary()",
"def setUpClass(cls):\n dt_index = pd.date_range(start=datetime(2019, 1, 1, 0, 1), periods=15,\n freq='1Min')\n\n # Create a temperature array with an average of 2.\n temp = [1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3]\n\n # Create ghi array with an average of 3.\n ghi = [2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4, 2, 3, 4]\n\n # Create DataFrame.\n cls.weather_data = pd.DataFrame({'temperature': temp, 'ghi': ghi},\n index=dt_index)\n\n # Create expected data.\n dt_index_2 = pd.date_range(start=datetime(2019, 1, 1, 0, 15), periods=1,\n freq='15Min')\n cls.expected_data = pd.DataFrame({'temperature': [2], 'ghi': [3]},\n index=dt_index_2)",
"def setUpTestData(cls) -> None:\n\n # Define base url\n cls.url = BASE_URL + '/'\n\n # Make 9 \"normal\" authors.\n cls.authors: typing.List[Author] = [\n create_author() for _ in range(9)\n ]\n\n # Make 1 superuser author.\n cls.super_author: Author = create_author(True)\n\n # Serialize data once so that it's not called in ever test\n cls.serialized_data = AuthorListSerializer(Author.objects.all(), many=True).data",
"def setUpTestData(cls):\n cls.post = PostFactory()",
"def setUpTestData(cls):\n cls.board = Board.objects.create(name = DICT.get('board_name') )\n\n cls.task = Task.objects.create(head = DICT.get('task_head'),\n description = DICT.get('task_description'),\n board = cls.board )",
"def setUpTestData(cls):\n cls.test_resource = Resource(name='Test', slug='test', description='')\n cls.test_resource.full_clean()\n cls.test_resource.save()\n cls.test_faculty = Faculty(name='Test', slug='test')\n cls.test_faculty.full_clean()\n cls.test_faculty.save()\n cls.test_department = Department(name='Test', slug='test', faculty=cls.test_faculty)\n cls.test_department.full_clean()\n cls.test_department.save()\n cls.test_agreement = Agreement(title='test-one',\n slug='test-one',\n resource=cls.test_resource,\n body='body',\n redirect_url='https://example.com',\n redirect_text='example-redirect')\n cls.test_agreement.full_clean()\n cls.test_agreement.save()\n cls.test_user = get_user_model().objects.create_user(username='test',\n first_name='test',\n last_name='test',\n email='[email protected]',\n password='testtesttest')",
"def setUp(self):\n self._default_call_inputs = (\n np.array([[\"one\", \"two\", \"three\"],\n [\"four\", \"five\", \"six\"]]),\n None\n )\n\n self._hash_embedding_dim = 4\n self._embedding_dim = 2\n\n self._default_config = {\n \"hash_embedding_dim\": self._hash_embedding_dim,\n \"embedding_dim\": self._embedding_dim\n }",
"def setUp(self):\n\n # Create a data pipe.\n self.interpreter.pipe.create('test', 'mf')\n\n # Create a temporary file name.\n ds.tmpfile = mktemp()",
"def setUp(self):\n patientgen = PatientsGenerator(0, 1, 0, 'a')\n self.record = patientgen.data.find('record')\n self.gender_sex = patientgen.gender_sex_list\n self.ethnicities = patientgen.ethnicity_list\n # self.female_names = patientgen.data_generator.first_names_female\n # self.male_names = patientgen.data_generator.first_names_male\n # self.last_names = patientgen.data_generator.last_names",
"def setUpClass(cls):\n cls.celltype_analyse = \"Adipocyte - Breast\"\n cls.data_type = \"promoters\"\n cls.sample_type = \"primary cells\"\n cls.algorithm = \"heuristic\"\n cls.k = 4\n cls.thresholds = (0.5, 0, 0) # act, inact, and sparseness, respectively\n cls.parsed = True\n cls.files_path = \"test\""
] | [
"0.7206878",
"0.69830936",
"0.69170666",
"0.6905432",
"0.68886626",
"0.68886626",
"0.68886626",
"0.68886626",
"0.6804058",
"0.6760746",
"0.67286724",
"0.6666416",
"0.6661453",
"0.66520184",
"0.6634772",
"0.66281074",
"0.6593501",
"0.6591871",
"0.657064",
"0.6567946",
"0.65450406",
"0.651043",
"0.6490195",
"0.646695",
"0.6460931",
"0.64570504",
"0.64548755",
"0.64516383",
"0.6447542",
"0.64463925"
] | 0.7271976 | 0 |
Run currently selected challenge code, start portion. Optional parameters can be passed if needed (unnamed or named), interpreted accordingly by selected test code. | def run_start_challenge_code(self, *chall_code_args, **chall_code_kwargs):
try:
code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1
# invoke corresponding start method, via index
self.start_challenge_code_list[code_index](*chall_code_args, **chall_code_kwargs)
except Exception as e:
print(type(e), e)
sys.exit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code001 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def run_test_code(self, *test_code_args, **test_code_kwargs):\n try:\n # here, trigger start code from challenge def (to simulate VM failure), manage Recovery time measurement,\n # specific monitoring of VNF, trigger stop code from challenge def\n\n time1 = datetime.now() # get time as soon as execution starts\n\n # create challenge execution instance\n chall_exec_ID = 1 # ideally, would be incremented, but need to maintain a number of challenge executions somewhere. or could be random.\n chall_exec_name = 'challenge execution' # challenge def ID is already passed\n chall_exec_challDefID = self.challenge_def_ID\n chall_exec = ChallengeExecution(chall_exec_ID, chall_exec_name, chall_exec_challDefID)\n chall_exec.log.append_to_list('challenge execution created')\n\n # create test execution instance\n test_exec_ID = 1 # ideally, would be incremented, but need to maintain a number of text executions somewhere. or could be random.\n test_exec_name = 'test execution' # test def ID is already passed\n test_exec_testDefID = self.ID\n test_exec_userID = '' # or get user name from getpass module: import getpass and test_exec_userID = getpass.getuser()\n test_exec = TestExecution(test_exec_ID, test_exec_name, test_exec_testDefID, chall_exec_ID, test_exec_userID)\n test_exec.log.append_to_list('test execution created')\n\n # get time1 before anything else, so the setup time is counted\n test_exec.start_time = time1\n\n # get challenge definition instance, and start challenge\n challenge_def = get_indexed_item_from_list(self.challenge_def_ID, AutoResilGlobal.challenge_definition_list)\n challenge_def.run_start_challenge_code()\n\n # memorize challenge start time\n chall_exec.start_time = datetime.now()\n test_exec.challenge_start_time = chall_exec.start_time\n\n # call specific test definition code, via table of functions; this code should monitor a VNF and return when restoration is observed\n test_code_index = self.test_code_ID - 1 # lists are indexed from 0 to N-1\n # invoke corresponding method, via index; could check for return code\n self.test_code_list[test_code_index](*test_code_args, **test_code_kwargs)\n\n # memorize restoration detection time and compute recovery time\n test_exec.restoration_detection_time = datetime.now()\n recovery_time_metric_def = get_indexed_item_from_file(1,FILE_METRIC_DEFINITIONS) # get Recovery Time metric definition: ID=1\n test_exec.recovery_time = recovery_time_metric_def.compute(test_exec.challenge_start_time, test_exec.restoration_detection_time)\n\n # stop challenge\n challenge_def.run_stop_challenge_code()\n\n # memorize challenge stop time\n chall_exec.stop_time = datetime.now()\n chall_exec.log.append_to_list('challenge execution finished')\n\n # write results to CSV files, memorize test finish time\n chall_exec.write_to_csv()\n test_exec.finish_time = datetime.now()\n test_exec.log.append_to_list('test execution finished')\n test_exec.write_to_csv()\n\n\n except Exception as e:\n print(type(e), e)\n sys.exit()",
"def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code005(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code005 from ChallengeDefinition #\",self.ID, sep='')\n # challenge #5, related to test case #5, i.e. test def #5\n # cloud reference (name and region) should be in clouds.yaml file\n # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')\n # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef\n # ChallengeDef suspends/resumes VM\n # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name\n # conn.compute.suspend_server(this server id)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # VM is created arbitrarily, not yet with ONAP\n # Openstack cloud was created by Fuel/MCP, descriptor in clouds.yaml file\n # VM resume done in Horizon (to simulate an ONAP-based recovery)\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n print(' suspending...')\n conn.compute.suspend_server(test_VM_ID)\n # wait a bit before continuing: ensure VM is actually suspended\n wait_seconds = 10\n print(' waiting',wait_seconds,'seconds...')\n time.sleep(wait_seconds)",
"def start_challenge_code004(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code004 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def main():\n\n args = sys.argv[1:]\n\n if len(args) == 5:\n initial_puzzle = load_initial_puzzle(args[2])\n print_initial_info(args, initial_puzzle)\n\n if puzzle_height != 4 or puzzle_width != 4:\n correct_puzzle = generate_correct_state(puzzle_height, puzzle_width)\n Puzzle.correct_state, Puzzle.puzzle_height, Puzzle.puzzle_width = correct_puzzle, puzzle_height, puzzle_width\n\n first_state = Puzzle(initial_puzzle)\n choose_method(args[0], args[1], first_state, args[3], args[4])\n\n else:\n print(\"Wrong number of arguments!\")",
"def start_challenge_code003(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code003 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code006(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code006 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code002(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code002 from ChallengeDefinition #\",self.ID, sep='')",
"def main():\n parser = ArgumentParser(description=__doc__,\n formatter_class=RawTextHelpFormatter)\n parser.add_argument('code', help='Python code to execute')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-3', action='store_const', dest='python',\n const='python3', help='Explicitly use Python 3')\n group.add_argument('-2', action='store_const', dest='python',\n const='python2', help='Explicitly use Python 2')\n group.add_argument('-p', '--python', help='Specify python interpreter')\n args = parser.parse_args()\n if args.python is not None:\n call([args.python, __file__, args.code])\n else:\n InteractiveInterpreter(LocalsImportDict()).runsource(args.code)",
"def main():\n opt = parse_opts()\n run(opt)",
"def main():\n opt = parse_opts()\n run(opt)",
"def main_code():\n pass",
"def run_min():\n\n # Takes the current path of the command line\n cur_dir = os.getcwd()\n os.chdir(cur_dir)\n\n parse = argparse.ArgumentParser(\n description='httptesting HTTP(s) interface testing framework.',\n prog='httptesting'\n )\n parse.add_argument(\n \"-v\",\n \"--version\",\n action='version',\n version=\"%(prog)s {}\".format(__version__),\n help='Framework version.'\n )\n parse.add_argument(\n \"-f\",\n \"--file\",\n nargs='+',\n default='',\n help='The file path; File absolute or relative path.'\n )\n parse.add_argument(\n \"-d\",\n \"--dir\",\n default='',\n help='The folder path; folder absolute or relative path.'\n )\n parse.add_argument(\n \"-sp\",\n \"--startproject\",\n default='',\n help='Generate test case templates.'\n )\n parse.add_argument(\n \"-conf\",\n \"--config\",\n nargs=\"+\",\n default='',\n help='Basic setting of framework.'\n )\n parse.add_argument(\n \"-har\",\n default='',\n help='Convert the har files to YAML. har file is *.har'\n )\n parse.add_argument(\n \"-c\",\n \"--convert\",\n default='',\n help='Convert the har files to YAML. YAML file is *.yaml'\n )\n\n # Command line arguments are assigned to varibales.\n args = parse.parse_args()\n case_file = args.file\n case_dir = args.dir\n start_project = args.startproject\n config = args.config\n har = args.har\n vert = args.convert\n\n # convert YAML.\n _convert_case_to_yaml(vert)\n\n # Convert har files to YAML.\n _convert_httphar_to_yaml(har)\n\n # Setting global var.\n _parse_config(config)\n\n # False work.\n _false_work(start_project)\n\n # Write file absolute path to file.\n # Get the yaml file name and write to the queue.\n\n _get_file_yaml(case_file)\n _get_dirs_case_yaml(case_dir)\n # Began to call.\n RunTestCase.invoke()",
"def main():\n num_of_tests = int(input())\n\n # iterate over test cases\n for test_case in range(1, num_of_tests + 1):\n result = handle_case()\n printable_result = handle_result(result)\n print(\"Case #{}: {}\".format(test_case, printable_result))",
"def main():\n\n\tif len(sys.argv) > 1 and sys.argv[1]:\n\t\t_, _, hash = read_file(sys.argv[1])\n\t\toffset_x = 0\n\t\toffset_y = 0\n\telse:\n\t\toffset_x, offset_y, hash = screenshot()\n\n\tprint(hash)\n\tgame = eliza_logic.Game(0)\n\tgame.exact_setup(hash)\n\tprint(game)\n\tresult = game.global_solve(-1)\n\tprint(result)\n\n\t# If it was a screen grab, we can actually do this -- just type n/q/c to quit or anything else to continue\n\tif result is not None and offset_x and offset_y:\n\t\tx = six.moves.input(\"Ready for automated solution? \")\n\t\tif x.lower() in [\"n\", \"q\", \"c\"]:\n\t\t\treturn\n\n\t\texecute_solution(offset_x, offset_y, result)",
"def run():\n\tif len(sys.argv) > 1 and sys.argv[1] in {'-V', '--version'}:\n\t\tprint(\"pokesim - Pokémon Battle Simulator - Version %s\" % __version__)\n\t\texit()\n\n\trandom.seed()\n\ttry:\n\t\tmain()\n\texcept (KeyboardInterrupt, EOFError):\n\t\texit(0)",
"def main():\n sec = sys.argv[-1]\n if sec not in ['1', '2', '3', '4']:\n print \"Your command line arguments dont make sense\"\n print \"Usage: python conditionals.py <number>\"\n return\n\n print \"Lets check your solutions!\\n\"\n print \"This is part {}\\n\".format(sec)\n if sec == '1':\n part1()\n elif sec == '2':\n part2()\n elif sec == '3':\n part3()\n elif sec == '4':\n part4()\n else:\n print \"Uh Oh something went wrong\"",
"def startOfTestcase(self):\n pass # nothing to do here. Hence pass statement is called.",
"def main():\n run_program()",
"def main():\n\n BASIC.run(PROGRAM)",
"def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()",
"def main():\n parser = argparse.ArgumentParser(\n description='Runs test for C++ implementation of M*')\n parser.add_argument('test_file', help='File describing test cases')\n parser.add_argument('output_file', help='Name of output file')\n parser.add_argument('num_processors', type=int, action='store',\n help='Number of processes to run on each node. ' +\n 'The local host running the primary server will ' +\n 'run one fewer worker processes')\n parser.add_argument('-i', action='store', type=float, default=1.0,\n help='Set inflation factor for the heuristic, ' +\n 'defaults to 1', metavar='INF', dest='inflation')\n parser.add_argument('-t', action='store', type=int, default=120,\n help='Set time limit for planning. Defaults to 2 ' +\n 'minutes', dest='time_limit')\n parser.add_argument('--hosts', action='store',\n default=('python', 'cobra', 'viper', 'anaconda'),\n help='Hostnames/IPs to use as processing nodes.',\n nargs='*', metavar='HOSTNAME')\n\n args = parser.parse_args()\n\n run_cpp_mstar_trial(args.test_file, args.output_file,\n inflation=args.inflation, time_limit=args.time_limit,\n hosts=args.hosts, num_processors=args.num_processors)",
"def run_tests():\n def print_result(result, correct):\n if result == correct:\n print(\" OK!\")\n else:\n print(f\" Failed ({result} != {correct})!\")\n for n, test in enumerate(_tests, start=1):\n print(f\"Running test {n}...\")\n nums = line2ints(test[\"in\"])\n try:\n correct = test[\"part1\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 1...\", end=\"\")\n result = part1(nums, steps=test.get(\"phases1\", 100))\n print_result(result, correct)\n try:\n correct = test[\"part2\"]\n except KeyError:\n pass\n else:\n print(\" Testing part 2...\", end=\"\")\n result = part2(nums, steps=test.get(\"phases2\", 100))\n print_result(result, correct)",
"def main():\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ Reads config file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n print('\\n\\nInterpreting command line options\\n'+'~'*72+'\\n')\n\n parser = ArgumentParser()\n subparser = parser.add_subparsers(\\\n help='run_selafin commands to do', dest='command')\n\n subparser = chop_parser(subparser)\n subparser = scan_parser(subparser)\n subparser = spec_parser(subparser)\n subparser = alter_parser(subparser)\n subparser = merge_parser(subparser)\n subparser = diff_parser(subparser)\n subparser = calcs_parser(subparser, 'calcs', '???')\n subparser = calcs_parser(subparser, 'crunch', '???')\n subparser = calcs_parser(subparser, 'transf', '???')\n subparser = sample_parser(subparser)\n subparser = subdivide_parser(subparser)\n subparser = tesselate_parser(subparser)\n\n options = parser.parse_args()\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ Reads code name ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n if options.command == 'scan':\n scan(options)\n elif options.command == 'spec':\n spec(options)\n elif options.command == 'chop':\n chop(options)\n elif options.command == 'alter':\n alter(options)\n elif options.command == 'merge':\n merge(options)\n elif options.command == 'diff':\n diff(options)\n elif options.command == 'sample':\n sample(options)\n elif options.command in ['calcs', 'crunch', 'transf']:\n calcs(options, options.command)\n elif options.command == 'subdivide':\n subdivide(options)\n elif options.command == 'tessellate':\n tesselate(options)\n else:\n raise TelemacException(\\\n '\\nDo not know what to do with '\n 'this code name: {}'.format(options.command))\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ Jenkins' success message ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n print('\\n\\nMy work is done\\n\\n')\n\n sys.exit(0)",
"def run():\n # Idle state, waiting for the challenge to start\n time_left = initialize_timer()\n while time_left == \"WAITING\":\n print_debug(\"Found waiting flag, will wait until something else happens\")\n time_left = initialize_timer()\n time.sleep(10)\n if time_left == \"END!\":\n print_debug(\"Challenge already ended\")\n return\n config.TIME_LEFT = time_left\n while config.TIME_LEFT >= 0:\n print_debug(\"Ending all X sessions\")\n end_all_X_sessions()\n print_debug(\"Saving time in specific location\")\n save_time_left(config.TIME_LEFT)\n print_debug(\"Sleeping: \"+str(config.RUN_EVERY*60))\n time.sleep(config.RUN_EVERY*60)\n config.TIME_LEFT = str(int(config.TIME_LEFT)-config.RUN_EVERY*60)\n print_debug(\"Challenge ended\")\n save_time_left(\"END!\")\n print_debug(\"Killing myself\")\n kill_itself()",
"def main(args):\n module = args.module\n\n if args.step not in STEP_OPTIONS:\n raise ValueError(\n f\"{args.step} is an unknown option. Your options are {STEP_OPTIONS}.\"\n )\n\n if module == \"structure_plan\":\n run_module_structure_plan(args)\n elif module == \"floor_plan\":\n run_module_floor_plan(args)\n elif module == \"complete_floorplan\":\n run_module_complete_floorplan(args)\n elif module == \"ground_plan\":\n run_module_ground_plan(args)\n elif module == \"text_to_gdf\":\n run_module_text_to_gdf(args)\n else:\n raise ValueError(\n f\"{module} is an unknown option. Your options are {MODULE_OPTIONS}.\"\n )"
] | [
"0.6356479",
"0.63256943",
"0.6307231",
"0.624422",
"0.6066037",
"0.5959313",
"0.5919837",
"0.5919461",
"0.5795596",
"0.57326335",
"0.57091653",
"0.5697462",
"0.5662525",
"0.5604493",
"0.5604493",
"0.5509749",
"0.5489085",
"0.5474635",
"0.5467004",
"0.54237247",
"0.5407435",
"0.53964406",
"0.5370216",
"0.5369432",
"0.5349055",
"0.5325414",
"0.53215766",
"0.5318295",
"0.5314745",
"0.5305838"
] | 0.66820836 | 0 |
Run currently selected challenge code, stop portion. Optional parameters can be passed if needed (unnamed or named), interpreted accordingly by selected test code. | def run_stop_challenge_code(self, *chall_code_args, **chall_code_kwargs):
try:
code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1
# invoke corresponding stop method, via index
self.stop_challenge_code_list[code_index](*chall_code_args, **chall_code_kwargs)
except Exception as e:
print(type(e), e)
sys.exit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_test_code(self, *test_code_args, **test_code_kwargs):\n try:\n # here, trigger start code from challenge def (to simulate VM failure), manage Recovery time measurement,\n # specific monitoring of VNF, trigger stop code from challenge def\n\n time1 = datetime.now() # get time as soon as execution starts\n\n # create challenge execution instance\n chall_exec_ID = 1 # ideally, would be incremented, but need to maintain a number of challenge executions somewhere. or could be random.\n chall_exec_name = 'challenge execution' # challenge def ID is already passed\n chall_exec_challDefID = self.challenge_def_ID\n chall_exec = ChallengeExecution(chall_exec_ID, chall_exec_name, chall_exec_challDefID)\n chall_exec.log.append_to_list('challenge execution created')\n\n # create test execution instance\n test_exec_ID = 1 # ideally, would be incremented, but need to maintain a number of text executions somewhere. or could be random.\n test_exec_name = 'test execution' # test def ID is already passed\n test_exec_testDefID = self.ID\n test_exec_userID = '' # or get user name from getpass module: import getpass and test_exec_userID = getpass.getuser()\n test_exec = TestExecution(test_exec_ID, test_exec_name, test_exec_testDefID, chall_exec_ID, test_exec_userID)\n test_exec.log.append_to_list('test execution created')\n\n # get time1 before anything else, so the setup time is counted\n test_exec.start_time = time1\n\n # get challenge definition instance, and start challenge\n challenge_def = get_indexed_item_from_list(self.challenge_def_ID, AutoResilGlobal.challenge_definition_list)\n challenge_def.run_start_challenge_code()\n\n # memorize challenge start time\n chall_exec.start_time = datetime.now()\n test_exec.challenge_start_time = chall_exec.start_time\n\n # call specific test definition code, via table of functions; this code should monitor a VNF and return when restoration is observed\n test_code_index = self.test_code_ID - 1 # lists are indexed from 0 to N-1\n # invoke corresponding method, via index; could check for return code\n self.test_code_list[test_code_index](*test_code_args, **test_code_kwargs)\n\n # memorize restoration detection time and compute recovery time\n test_exec.restoration_detection_time = datetime.now()\n recovery_time_metric_def = get_indexed_item_from_file(1,FILE_METRIC_DEFINITIONS) # get Recovery Time metric definition: ID=1\n test_exec.recovery_time = recovery_time_metric_def.compute(test_exec.challenge_start_time, test_exec.restoration_detection_time)\n\n # stop challenge\n challenge_def.run_stop_challenge_code()\n\n # memorize challenge stop time\n chall_exec.stop_time = datetime.now()\n chall_exec.log.append_to_list('challenge execution finished')\n\n # write results to CSV files, memorize test finish time\n chall_exec.write_to_csv()\n test_exec.finish_time = datetime.now()\n test_exec.log.append_to_list('test execution finished')\n test_exec.write_to_csv()\n\n\n except Exception as e:\n print(type(e), e)\n sys.exit()",
"def stop_challenge_code005(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code005 from ChallengeDefinition #\",self.ID, sep='')\n # challenge #5, related to test case #5, i.e. test def #5\n # cloud reference (name and region) should be in clouds.yaml file\n # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')\n # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef\n # ChallengeDef suspends/resumes VM\n # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name\n # conn.compute.conn.compute.resume_server(this server id)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # this resume would be the normal challenge stop, but not in the case of this test\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n print(' suspending...')\n conn.compute.resume_server(test_VM_ID)",
"def stop_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code001 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def run_start_challenge_code(self, *chall_code_args, **chall_code_kwargs):\n\n try:\n code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1\n # invoke corresponding start method, via index\n self.start_challenge_code_list[code_index](*chall_code_args, **chall_code_kwargs)\n except Exception as e:\n print(type(e), e)\n sys.exit()",
"def stop_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code004(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code004 from ChallengeDefinition #\",self.ID, sep='')",
"def run():\n # Idle state, waiting for the challenge to start\n time_left = initialize_timer()\n while time_left == \"WAITING\":\n print_debug(\"Found waiting flag, will wait until something else happens\")\n time_left = initialize_timer()\n time.sleep(10)\n if time_left == \"END!\":\n print_debug(\"Challenge already ended\")\n return\n config.TIME_LEFT = time_left\n while config.TIME_LEFT >= 0:\n print_debug(\"Ending all X sessions\")\n end_all_X_sessions()\n print_debug(\"Saving time in specific location\")\n save_time_left(config.TIME_LEFT)\n print_debug(\"Sleeping: \"+str(config.RUN_EVERY*60))\n time.sleep(config.RUN_EVERY*60)\n config.TIME_LEFT = str(int(config.TIME_LEFT)-config.RUN_EVERY*60)\n print_debug(\"Challenge ended\")\n save_time_left(\"END!\")\n print_debug(\"Killing myself\")\n kill_itself()",
"def stop_challenge_code003(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code003 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code002(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code002 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code006(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code006 from ChallengeDefinition #\",self.ID, sep='')",
"def run(self):\n\n #self.call('switch', 'test')\n val = self.find_start_condition()\n print val\n # Only on of these values will occur. Find which\n if ((val == 0) or (val==3)):\n # kill server\n self.ctrl_client.exit_server()\n # kill client\n self.ctrl_client.clean_up()\n return\n elif (val == 1 or val==2):\n self.go_through_tunnel()\n time.sleep(1)\n self.do_Zone_B()\n self.ctrl_client.exit_server()\n self.ctrl_client.clean_up()\n return",
"def start_challenge_code005(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code005 from ChallengeDefinition #\",self.ID, sep='')\n # challenge #5, related to test case #5, i.e. test def #5\n # cloud reference (name and region) should be in clouds.yaml file\n # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')\n # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef\n # ChallengeDef suspends/resumes VM\n # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name\n # conn.compute.suspend_server(this server id)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # VM is created arbitrarily, not yet with ONAP\n # Openstack cloud was created by Fuel/MCP, descriptor in clouds.yaml file\n # VM resume done in Horizon (to simulate an ONAP-based recovery)\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n print(' suspending...')\n conn.compute.suspend_server(test_VM_ID)\n # wait a bit before continuing: ensure VM is actually suspended\n wait_seconds = 10\n print(' waiting',wait_seconds,'seconds...')\n time.sleep(wait_seconds)",
"def main(tests, sources, fail_fast, config=None):\n try:\n smokr.run_tests(tests.split(','), sources.split(','),\n fail_fast, config)\n return 0\n except AssertionError:\n sys.exit(1)",
"def main():\n opt = parse_opts()\n run(opt)",
"def main():\n opt = parse_opts()\n run(opt)",
"def main():\n num_of_tests = int(input())\n\n # iterate over test cases\n for test_case in range(1, num_of_tests + 1):\n result = handle_case()\n printable_result = handle_result(result)\n print(\"Case #{}: {}\".format(test_case, printable_result))",
"def run():\n\tif len(sys.argv) > 1 and sys.argv[1] in {'-V', '--version'}:\n\t\tprint(\"pokesim - Pokémon Battle Simulator - Version %s\" % __version__)\n\t\texit()\n\n\trandom.seed()\n\ttry:\n\t\tmain()\n\texcept (KeyboardInterrupt, EOFError):\n\t\texit(0)",
"def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def main():\n\n args = sys.argv[1:]\n\n if len(args) == 5:\n initial_puzzle = load_initial_puzzle(args[2])\n print_initial_info(args, initial_puzzle)\n\n if puzzle_height != 4 or puzzle_width != 4:\n correct_puzzle = generate_correct_state(puzzle_height, puzzle_width)\n Puzzle.correct_state, Puzzle.puzzle_height, Puzzle.puzzle_width = correct_puzzle, puzzle_height, puzzle_width\n\n first_state = Puzzle(initial_puzzle)\n choose_method(args[0], args[1], first_state, args[3], args[4])\n\n else:\n print(\"Wrong number of arguments!\")",
"def main():\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ Reads config file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n print('\\n\\nInterpreting command line options\\n'+'~'*72+'\\n')\n\n parser = ArgumentParser()\n subparser = parser.add_subparsers(\\\n help='run_selafin commands to do', dest='command')\n\n subparser = chop_parser(subparser)\n subparser = scan_parser(subparser)\n subparser = spec_parser(subparser)\n subparser = alter_parser(subparser)\n subparser = merge_parser(subparser)\n subparser = diff_parser(subparser)\n subparser = calcs_parser(subparser, 'calcs', '???')\n subparser = calcs_parser(subparser, 'crunch', '???')\n subparser = calcs_parser(subparser, 'transf', '???')\n subparser = sample_parser(subparser)\n subparser = subdivide_parser(subparser)\n subparser = tesselate_parser(subparser)\n\n options = parser.parse_args()\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ Reads code name ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n if options.command == 'scan':\n scan(options)\n elif options.command == 'spec':\n spec(options)\n elif options.command == 'chop':\n chop(options)\n elif options.command == 'alter':\n alter(options)\n elif options.command == 'merge':\n merge(options)\n elif options.command == 'diff':\n diff(options)\n elif options.command == 'sample':\n sample(options)\n elif options.command in ['calcs', 'crunch', 'transf']:\n calcs(options, options.command)\n elif options.command == 'subdivide':\n subdivide(options)\n elif options.command == 'tessellate':\n tesselate(options)\n else:\n raise TelemacException(\\\n '\\nDo not know what to do with '\n 'this code name: {}'.format(options.command))\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ Jenkins' success message ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n print('\\n\\nMy work is done\\n\\n')\n\n sys.exit(0)",
"def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')",
"def main():\n codedmessage = ReadCodedMessage()\n PlayCodedMessage(codedmessage)\n PlayAgain(codedmessage)\n message = DecodeCodedMessage(codedmessage)\n if (message==\"?\"):\n if DEBUG:print(\"Unknown code - try again!\")\n else:\n if DEBUG:print (\"Message: \", message)",
"def main():\n\n\tif len(sys.argv) > 1 and sys.argv[1]:\n\t\t_, _, hash = read_file(sys.argv[1])\n\t\toffset_x = 0\n\t\toffset_y = 0\n\telse:\n\t\toffset_x, offset_y, hash = screenshot()\n\n\tprint(hash)\n\tgame = eliza_logic.Game(0)\n\tgame.exact_setup(hash)\n\tprint(game)\n\tresult = game.global_solve(-1)\n\tprint(result)\n\n\t# If it was a screen grab, we can actually do this -- just type n/q/c to quit or anything else to continue\n\tif result is not None and offset_x and offset_y:\n\t\tx = six.moves.input(\"Ready for automated solution? \")\n\t\tif x.lower() in [\"n\", \"q\", \"c\"]:\n\t\t\treturn\n\n\t\texecute_solution(offset_x, offset_y, result)",
"def start_challenge_code002(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code002 from ChallengeDefinition #\",self.ID, sep='')",
"def main_code():\n pass"
] | [
"0.66960603",
"0.6296914",
"0.6186369",
"0.61045015",
"0.60794216",
"0.60040975",
"0.5985018",
"0.59766287",
"0.58844936",
"0.5848492",
"0.584254",
"0.581427",
"0.5780125",
"0.5620953",
"0.55936074",
"0.5545952",
"0.5510592",
"0.5510592",
"0.5497748",
"0.54770064",
"0.54618114",
"0.54350454",
"0.5427054",
"0.54245377",
"0.5411745",
"0.54098743",
"0.54040486",
"0.5377801",
"0.5364428",
"0.53531224"
] | 0.65709126 | 1 |
Start Challenge code number 001. | def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs):
print("This is start_challenge_code001 from ChallengeDefinition #",self.ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code006(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code006 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code004(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code004 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code002(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code002 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code003(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code003 from ChallengeDefinition #\",self.ID, sep='')",
"def LOWER_START():\n return 7",
"def START_BANK() -> int:\n return 100",
"def start_with_the_beggining(rna: str):\n return 0",
"def startOfTestcase(self):\n pass # nothing to do here. Hence pass statement is called.",
"def create_code():\n\n code = [0, 0, 0, 0]\n\n for i in range(4):\n value = random.randint(1, 8) # 8 possible digits\n while value in code:\n value = random.randint(1, 8) # 8 possible digits\n code[i] = value\n \n #print(code)\n return code",
"def UPPER_START():\n return 1",
"def start_challenge_code005(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code005 from ChallengeDefinition #\",self.ID, sep='')\n # challenge #5, related to test case #5, i.e. test def #5\n # cloud reference (name and region) should be in clouds.yaml file\n # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')\n # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef\n # ChallengeDef suspends/resumes VM\n # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name\n # conn.compute.suspend_server(this server id)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # VM is created arbitrarily, not yet with ONAP\n # Openstack cloud was created by Fuel/MCP, descriptor in clouds.yaml file\n # VM resume done in Horizon (to simulate an ONAP-based recovery)\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n print(' suspending...')\n conn.compute.suspend_server(test_VM_ID)\n # wait a bit before continuing: ensure VM is actually suspended\n wait_seconds = 10\n print(' waiting',wait_seconds,'seconds...')\n time.sleep(wait_seconds)",
"def test_counter_start_at_zero(self):\n pass",
"def _prepare(self):\n number_of_numbers = 10\n code_length = safe_int_input(\"How long is the code to be guessed? (4-10): \", 4, 10)\n numbers = '1234567890'[:number_of_numbers]\n code = ''.join(random.choices(numbers, k=code_length))",
"def _prepare(self):\n for n in range(4):\n self._code += str(random.randint(1, 9))",
"def main():\n import sys\n plain = raw_input(\"Please enter the plaintext string you want to encode: \")\n print \"Here are the ASCII codes for that text, space-separated:\"\n for e in plain:\n print ord(e),\n print\n if raw_input(\"Press RETURN to exit.\"):\n sys.exit(0)",
"def _sendStart_result (self, (code, data)) :\n\n assert code == \"REPLY_SUCCESS\"\n\n return code",
"def show_instructions():\n\n print('4-digit Code has been set. Digits in range 1 to 8. You have 12 turns to break it.')",
"def initiate_codes(lottery_file):\n # load the lottery data\n lottery = {}\n with open(lottery_file) as lf:\n head = lf.readline()\n prev = None\n for line in lf:\n info = line.strip().split('|')\n issue = info[0]\n nums = map(int, info[1:])\n lottery[issue] = {\"numbers\": nums, \"previous\":prev, \"issue\": issue}\n prev = issue\n\n # get the missing info for 20150901001\n issues = sorted(lottery.keys())\n lot_miss_info = {}\n for issue in issues[100:]:\n lot_miss_info[issue] = {}\n # 0: ten thousand, 1: thousand, 2: hundred, 3: ten, 4: unit\n for i in range(5):\n lot_miss_info[issue][i] = {}\n for dig in range(10):\n lot_miss_info[issue][i][dig] = 0\n mis_count = 0\n # trace back and get the previous appearence\n cur = issue\n while True:\n lot = lottery[cur]\n if lot[\"numbers\"][i] == dig:\n break\n else:\n mis_count += 1\n cur = lot[\"previous\"]\n lot_miss_info[issue][i][dig] = mis_count\n\n # compute the codes information\n codes = {}\n for issue in issues[100:]:\n # currently we only consider unit(4) and ten(3) digit codes\n # we have defined 7 codes\n # luo_ma: 当前中奖数字\n # leng_1_ma: 当前期中最大间隔的数字\n # leng_2_ma: 当前期中第二大间隔的数字\n # sha_ma: 十位(落码-1), 个位(落码*3+3)\n # chuan_1: 落码-1\n # chuan_2: 落码+1\n # 隔码: 上一期的落码\n codes[issue] = {}\n for dig in range(3, 5):\n code = compute_code(issue, dig, lottery, lot_miss_info)\n codes[issue][dig] = code\n\n # compute the match information\n matched = {} # 只匹配落/杀/冷12码\n full_matched = {}# 匹配所有6码\n match_keys = [\"luo_ma\", \"leng_1_ma\", \"leng_2_ma\", \"sha_ma\"]\n \n full_match_keys = match_keys + [\"chuan_1\", \"chuan_2\", \"ge_ma\"]\n for issue in issues[101:]:\n prev_id = lottery[issue][\"previous\"]\n numbers = lottery[issue][\"numbers\"]\n prev_code = codes[prev_id]\n flag, full_flag = update_match(lottery[issue], prev_code)\n matched[issue] = flag\n full_matched[issue] = full_flag\n\n # compute the l4z1hbz\n l4z1hbz_seq = {}\n for issue in issues[108:]:\n l4z1hbz_seq[issue] = compute_l4z1hbz(issue, matched, lottery)\n\n return lottery, lot_miss_info, codes, matched, full_matched, l4z1hbz_seq",
"def LOWER_STOP():\n return 13",
"def generate_game_code() -> int:\n while True:\n # code will only contain digits\n code_options = string.digits\n generated_game_code = ''.join(secrets.choice(code_options) for i in range(7))\n if Game.objects.filter(game_code=generated_game_code).count() == 0:\n break\n return int(generated_game_code)",
"def first_code_word():\r\n\r\n code = ''.join(f'{random.randint(1, 3)}f{random.randint(1,3)}e')\r\n return code",
"def checkdigit(code):\n check = sum((i+1)*int(code[i]) for i in range(9)) % 11\n return 'X' if check == 10 else str(check)",
"def startGame():\n\n\tprint(\"\\nOK! Let's play!\")\n\tprint(\"--------------------------------------------------------------------------------------\")\n\tprint(\"Note:\")\n\tprint(\"\\tNow you must be kept in your mind a random integer from specific range and I must be guessing that number!\")\n\tprint(\"\\tIf you answer honestly all of my questions I certainly will guess that number!\")\n\tprint(\"--------------------------------------------------------------------------------------\\n\")\n\tgameLogic()",
"def start_algorithm(self):\r\n pass",
"def run_start_challenge_code(self, *chall_code_args, **chall_code_kwargs):\n\n try:\n code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1\n # invoke corresponding start method, via index\n self.start_challenge_code_list[code_index](*chall_code_args, **chall_code_kwargs)\n except Exception as e:\n print(type(e), e)\n sys.exit()",
"def getFirstChar(self):\n if self.i1 is None:\n self.firstChar = None\n else:\n chrNum = int(self.i1 // 10)\n if chrNum < 26:\n # should result in something like A4 for 4, B6 for 16\n self.firstChar = chr(ASCII_LETTER_A + chrNum) + str(self.i1 % 10)\n else:\n runLog.warning(\n \"invalid location. ring {0} is too many rings!\".format(self.i1),\n self,\n )"
] | [
"0.6894425",
"0.67563266",
"0.6553159",
"0.639238",
"0.6259374",
"0.62578154",
"0.6103054",
"0.6056246",
"0.594367",
"0.5615532",
"0.55993485",
"0.5537023",
"0.5364862",
"0.53640056",
"0.5357535",
"0.5343135",
"0.5287774",
"0.524516",
"0.5242702",
"0.5179436",
"0.51720536",
"0.5165508",
"0.5161308",
"0.5156487",
"0.514208",
"0.5063248",
"0.5062622",
"0.50513697",
"0.50488526",
"0.504729"
] | 0.69719726 | 0 |
Stop Challenge code number 001. | def stop_challenge_code001(self, *chall_code_args, **chall_code_kwargs):
print("This is stop_challenge_code001 from ChallengeDefinition #",self.ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stop_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code002(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code002 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code006(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code006 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code004(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code004 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code003(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code003 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code005(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code005 from ChallengeDefinition #\",self.ID, sep='')\n # challenge #5, related to test case #5, i.e. test def #5\n # cloud reference (name and region) should be in clouds.yaml file\n # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')\n # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef\n # ChallengeDef suspends/resumes VM\n # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name\n # conn.compute.conn.compute.resume_server(this server id)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # this resume would be the normal challenge stop, but not in the case of this test\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n print(' suspending...')\n conn.compute.resume_server(test_VM_ID)",
"def LOWER_STOP():\n return 13",
"def run_stop_challenge_code(self, *chall_code_args, **chall_code_kwargs):\n try:\n code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1\n # invoke corresponding stop method, via index\n self.stop_challenge_code_list[code_index](*chall_code_args, **chall_code_kwargs)\n except Exception as e:\n print(type(e), e)\n sys.exit()",
"def UPPER_STOP():\n return 6",
"def stop(self):\n return self.writevar('\\xC5\\x98',0,1)",
"def TerminalClientStop(self, exitCode=200):\n pass",
"def Stop():\n timer.stop()\n global total_stop\n global success_stop\n total_stop += 1\n if n % 10 == 0:\n success_stop = success_stop + 1",
"def stop():\n global total_attempts, successful_stops, running\n timer.stop()\n running = False\n if running == False:\n if counter % 10 == 0 and counter != 0:\n successful_stops += 1\n total_attempts += 1\n elif counter != 0:\n total_attempts += 1",
"def Stop(self, message=\"\"):\n delta = int(self.length - self.nbits)\n sys.stdout.write(\" \" * delta + \"] \" + message + \"\\n\")",
"def stop() -> None:",
"def stop():",
"def stop():",
"def stop():",
"def stop():",
"def manual_stop(self):\n self.manual_seqnum = 0\n return self.send(\"app_rc_end\")",
"def stop(self,c,data):\r\n self.board.stop()\r\n return True",
"def InterfaceClientStop(self, exitCode=200): \n pass",
"def stop(self):\n return _spacegrant_swig.invert_bit_sptr_stop(self)",
"def leave_now(code):\n exit(int(code))",
"def stop(self) -> None:",
"def stop(self) -> None:",
"def stop(self, pin):\n raise NotImplementedError"
] | [
"0.75793904",
"0.7471198",
"0.742199",
"0.7307441",
"0.7235597",
"0.71809566",
"0.7176245",
"0.7097188",
"0.6406136",
"0.6249265",
"0.5987562",
"0.5751092",
"0.56848633",
"0.56230175",
"0.5573714",
"0.55355936",
"0.55021286",
"0.5501443",
"0.5487698",
"0.5487698",
"0.5487698",
"0.5487698",
"0.5439028",
"0.5389517",
"0.53435755",
"0.5317032",
"0.5237805",
"0.52272135",
"0.52272135",
"0.5221478"
] | 0.753365 | 1 |
Start Challenge code number 004. | def start_challenge_code004(self, *chall_code_args, **chall_code_kwargs):
print("This is start_challenge_code004 from ChallengeDefinition #",self.ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code001 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code006(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code006 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code003(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code003 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code002(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code002 from ChallengeDefinition #\",self.ID, sep='')",
"def LOWER_START():\n return 7",
"def show_instructions():\n\n print('4-digit Code has been set. Digits in range 1 to 8. You have 12 turns to break it.')",
"def start_with_the_beggining(rna: str):\n return 0",
"def start_challenge_code005(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code005 from ChallengeDefinition #\",self.ID, sep='')\n # challenge #5, related to test case #5, i.e. test def #5\n # cloud reference (name and region) should be in clouds.yaml file\n # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')\n # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef\n # ChallengeDef suspends/resumes VM\n # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name\n # conn.compute.suspend_server(this server id)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # VM is created arbitrarily, not yet with ONAP\n # Openstack cloud was created by Fuel/MCP, descriptor in clouds.yaml file\n # VM resume done in Horizon (to simulate an ONAP-based recovery)\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n print(' suspending...')\n conn.compute.suspend_server(test_VM_ID)\n # wait a bit before continuing: ensure VM is actually suspended\n wait_seconds = 10\n print(' waiting',wait_seconds,'seconds...')\n time.sleep(wait_seconds)",
"def START_BANK() -> int:\n return 100",
"def startOfTestcase(self):\n pass # nothing to do here. Hence pass statement is called.",
"def start_algorithm(self):\r\n pass",
"def start():\n # Have the car begin at a stop\n rc.drive.stop()\n\n global width\n global height\n width = rc.camera.get_width()\n height = rc.camera.get_height()\n # rc.drive.set_max_speed(1)\n\n global currentChallenge\n global oldState\n currentChallenge = Challenge.ManualControl\n oldState = Challenge.Line\n\n global colorPriority\n colorPriority = None\n\n global oldCones\n oldCones = None\n\n global last_waypoint_type\n last_waypoint_type = None\n\n # Print start message\n print(\">> Final Challenge - Time Trials\")",
"def _prepare(self):\n number_of_numbers = 10\n code_length = safe_int_input(\"How long is the code to be guessed? (4-10): \", 4, 10)\n numbers = '1234567890'[:number_of_numbers]\n code = ''.join(random.choices(numbers, k=code_length))",
"def UPPER_START():\n return 1",
"def LOWER_STOP():\n return 13",
"def primer_start_fix(self):\r\n #TODO this function will not be used anymore, remove?\r\n if self.type in [\"forward_primer\", \"reverse_primer\", \"PCR_product\"]:\r\n self.start += 1\r\n if self.type == \"region\" and self.source == \"Primer3\":\r\n # this is the region containing the primers\r\n self.start += 1",
"def _prepare(self):\n for n in range(4):\n self._code += str(random.randint(1, 9))",
"def getStage(code):\n loc = code.find('x')\n if loc < 0: loc = 4\n if code == \"XXXX\": loc = 0\n return loc",
"def day_01_b() -> int:\n return get_first_step_basement(read_instructions('aoc/aoc2015/input/01A.txt'))",
"def MolecularToolsStartUp():\r\n StartUpLine1 = 'Welcome to Data Tools'\r\n StartUpLine2 = 'The software to support Data Science' \r\n StartUpLine3 = 'for data collected from CFOUR'\r\n StartUpHeader = '\\t+{:-<42}+\\n'.format('')\r\n StartUpHeader += '\\t| {:^40} |\\n'.format(StartUpLine1)\r\n StartUpHeader += '\\t+{:-<42}+\\n'.format('')\r\n StartUpHeader += '\\t| {:40} |\\n'.format(StartUpLine2) \r\n StartUpHeader += '\\t| {:40} |\\n'.format(StartUpLine3) \r\n StartUpHeader += '\\t+{:-<42}+\\n'.format('')\r\n print(StartUpHeader)",
"def _sendStart_result (self, (code, data)) :\n\n assert code == \"REPLY_SUCCESS\"\n\n return code",
"def event_m20_11_4000000():\n \"\"\"State 0,2: [Lib] Character: Petrified: Key Guide_SubState\"\"\"\n assert event_m20_11_x37(z94=5300, z95=0, z96=15, z97=211000030, z98=0, z99=1600, z100=6, z101=4000010)\n \"\"\"State 1: Finish\"\"\"\n EndMachine()",
"def coding():\r\n \r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n x=0 #determine the sliding of the letters\r\n \r\n def isKeyEmpty(k):\r\n \"\"\"Utility Function that checks if key is empty\"\"\"\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False\r\n \r\n def set_key(vars): #vars=[0]num,[1]rWord,[2]rString\r\n \"\"\"Function that set the new key\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=vars[0]\r\n if (vars[1]=='yes'):\r\n key['reverse_word']=True\r\n if (vars[2]=='yes'):\r\n key['reverse_string']=True\r\n if (x<-26 or x>26):\r\n x=x%26 #makes x to be in range\r\n if (x==0):\r\n x=random.randrange(-26,26) #random number\r\n for i in range (97,123): #26 ABC letters, ASCII value of 'a' is 97 97+26=123\r\n if(i+x>122):\r\n key[chr(i)]=chr(i-25+x)\r\n elif (i+x<97):\r\n key[chr(i)]=chr(i+26+x)\r\n else:\r\n key[chr(i)]=chr(i+x)\r\n print(\"done\")\r\n \r\n def empty_key():\r\n \"\"\"Function makes current key empty\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=0\r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n print(\"done\")\r\n \r\n def export_key():\r\n \"\"\"Function export key\"\"\"\r\n if(isKeyEmpty(key)):\r\n print(\"key empty\")\r\n else:\r\n return key\r\n \r\n def import_key(key2):\r\n \"\"\"Function import key\"\"\"\r\n nonlocal key\r\n if(isKeyEmpty(key2)):\r\n print(\"key is empty\")\r\n else:\r\n key=key2\r\n print(\"done\")\r\n \r\n def encoding(sentence):\r\n \"\"\"function encoding given string with the key\"\"\"\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if (sentence[i]!=' '):\r\n sentence[i]=key[sentence[i]]\r\n sentence=''.join(sentence)\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n return sentence\r\n \r\n def decoding(sentence):\r\n \"\"\"function decoding given string with the key\"\"\"\r\n if(isKeyEmpty(key)):\r\n return \"key empty\"\r\n helpKey=dict((y,x) for x,y in key.items())\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if(sentence[i]!=' '):\r\n sentence[i]=helpKey[sentence[i]]\r\n sentence=''.join(sentence)\r\n return sentence\r\n\r\n def dispatch(message,var=None):\r\n \"\"\"dispatch with message passing\"\"\"\r\n if message=='set_key':\r\n set_key(var)\r\n elif message=='empty_key':\r\n empty_key()\r\n elif message=='export_key':\r\n return export_key()\r\n elif message=='import_key':\r\n import_key(var)\r\n elif message=='encoding':\r\n return encoding(var)\r\n elif message=='decoding':\r\n return decoding(var)\r\n else:\r\n print(\"Unknown message\") \r\n return dispatch",
"def generate_starting_point() -> str:\n starter = ''\n for i in range(len(wf.ANSWER) // wf.SECTION_LENGTH):\n section = list(wf.ANSWER[wf.SECTION_LENGTH * i:wf.SECTION_LENGTH * (i + 1)])\n random.shuffle(section)\n starter = starter + ''.join(section)\n return starter",
"def startPhase(self, phaseName):\n \n pass",
"def start_handle(self, start_inst):\n if start_inst[9:14].lower().strip() == 'start':\n start_add = start_inst[17:34]\n start_add = start_add.strip()\n self.name = start_inst[0:7].strip().lower()\n self.OPTAB[start_inst[0:7].lower().strip()] = int(start_add, 16)\n print(start_add)\n return int(start_add, 16)\n else:\n self.errors.append(\"No START at begin of the program\")\n return 0"
] | [
"0.66249907",
"0.66167706",
"0.660856",
"0.6371558",
"0.625915",
"0.61321294",
"0.60450536",
"0.6003993",
"0.597335",
"0.55251557",
"0.55198294",
"0.54818565",
"0.5462468",
"0.54547775",
"0.5364101",
"0.53625154",
"0.52530175",
"0.52228147",
"0.521231",
"0.5203257",
"0.5198468",
"0.5163663",
"0.514219",
"0.513408",
"0.5130263",
"0.51114875",
"0.50952154",
"0.50824463",
"0.5079608",
"0.50679183"
] | 0.6677702 | 0 |
Stop Challenge code number 004. | def stop_challenge_code004(self, *chall_code_args, **chall_code_kwargs):
print("This is stop_challenge_code004 from ChallengeDefinition #",self.ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stop_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code001 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code002(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code002 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code006(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code006 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code003(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code003 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code005(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code005 from ChallengeDefinition #\",self.ID, sep='')\n # challenge #5, related to test case #5, i.e. test def #5\n # cloud reference (name and region) should be in clouds.yaml file\n # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')\n # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef\n # ChallengeDef suspends/resumes VM\n # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name\n # conn.compute.conn.compute.resume_server(this server id)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # this resume would be the normal challenge stop, but not in the case of this test\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n print(' suspending...')\n conn.compute.resume_server(test_VM_ID)",
"def LOWER_STOP():\n return 13",
"def run_stop_challenge_code(self, *chall_code_args, **chall_code_kwargs):\n try:\n code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1\n # invoke corresponding stop method, via index\n self.stop_challenge_code_list[code_index](*chall_code_args, **chall_code_kwargs)\n except Exception as e:\n print(type(e), e)\n sys.exit()",
"def stop(self):\n return self.writevar('\\xC5\\x98',0,1)",
"def UPPER_STOP():\n return 6",
"def stop() -> None:",
"def stop():",
"def stop():",
"def stop():",
"def stop():",
"def manual_stop(self):\n self.manual_seqnum = 0\n return self.send(\"app_rc_end\")",
"def stop():\n global total_attempts, successful_stops, running\n timer.stop()\n running = False\n if running == False:\n if counter % 10 == 0 and counter != 0:\n successful_stops += 1\n total_attempts += 1\n elif counter != 0:\n total_attempts += 1",
"def stopMeasurement_pmt_contour(self):\r\n self.pmtTest_contour.aboutToQuitHandler()",
"def stop_procedure(self):\n pass",
"def do_stop(self):\n debug(\"CBA4.do_stop()\")\n if (self.__thread and self.__thread.isAlive()):\n self.__thread.stop()\n self.__thread.join(None)\n self.__thread = None\n\n if (self.is_valid()):\n tx = bytearray(16)\n tx[0] = 0x53\n tx[1] = 1\n self.get_status_response(tx)\n #end do_stop()",
"def stop(self,c,data):\r\n self.board.stop()\r\n return True",
"def Stop():\n timer.stop()\n global total_stop\n global success_stop\n total_stop += 1\n if n % 10 == 0:\n success_stop = success_stop + 1",
"def stop(self):\n return _spacegrant_swig.invert_bit_sptr_stop(self)",
"def TerminalClientStop(self, exitCode=200):\n pass",
"def stop(self):\n return _spacegrant_swig.G3RUH_descramble_sptr_stop(self)",
"def stopMeasurement_pmt(self):\r\n self.pmtTest.aboutToQuitHandler()",
"def stop():\n if timer.is_running():\n timer.stop()\n global tries, wins, winstreak, losestreak, mood, scorepos\n tries += 1\n if current % 10 == 0:\n wins += 1\n winstreak += 1\n losestreak = 0\n mood = goodmood(winstreak)\n else:\n winstreak = 0\n losestreak += 1\n mood = badmood(losestreak)\n if tries > 9:\n scorepos = (241, 140)\n if wins > 9:\n scorepos = (228, 140)"
] | [
"0.75250125",
"0.74801797",
"0.74294746",
"0.7384983",
"0.73195195",
"0.7241099",
"0.7205476",
"0.7192013",
"0.6681139",
"0.6135016",
"0.58822006",
"0.5804823",
"0.5710702",
"0.56781924",
"0.56563896",
"0.56563896",
"0.56563896",
"0.56563896",
"0.5545634",
"0.55392265",
"0.55120385",
"0.5499313",
"0.5484345",
"0.5458415",
"0.54513335",
"0.54456145",
"0.5433348",
"0.54274255",
"0.5408454",
"0.540797"
] | 0.75243205 | 1 |
Start Challenge code number 006. | def start_challenge_code006(self, *chall_code_args, **chall_code_kwargs):
print("This is start_challenge_code006 from ChallengeDefinition #",self.ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code001 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code004(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code004 from ChallengeDefinition #\",self.ID, sep='')",
"def LOWER_START():\n return 7",
"def start_challenge_code002(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code002 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code003(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code003 from ChallengeDefinition #\",self.ID, sep='')",
"def startOfTestcase(self):\n pass # nothing to do here. Hence pass statement is called.",
"def start_challenge_code005(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code005 from ChallengeDefinition #\",self.ID, sep='')\n # challenge #5, related to test case #5, i.e. test def #5\n # cloud reference (name and region) should be in clouds.yaml file\n # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')\n # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef\n # ChallengeDef suspends/resumes VM\n # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name\n # conn.compute.suspend_server(this server id)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # VM is created arbitrarily, not yet with ONAP\n # Openstack cloud was created by Fuel/MCP, descriptor in clouds.yaml file\n # VM resume done in Horizon (to simulate an ONAP-based recovery)\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n print(' suspending...')\n conn.compute.suspend_server(test_VM_ID)\n # wait a bit before continuing: ensure VM is actually suspended\n wait_seconds = 10\n print(' waiting',wait_seconds,'seconds...')\n time.sleep(wait_seconds)",
"def start_algorithm(self):\r\n pass",
"def START_BANK() -> int:\n return 100",
"def day_01_b() -> int:\n return get_first_step_basement(read_instructions('aoc/aoc2015/input/01A.txt'))",
"def start_with_the_beggining(rna: str):\n return 0",
"def MolecularToolsStartUp():\r\n StartUpLine1 = 'Welcome to Data Tools'\r\n StartUpLine2 = 'The software to support Data Science' \r\n StartUpLine3 = 'for data collected from CFOUR'\r\n StartUpHeader = '\\t+{:-<42}+\\n'.format('')\r\n StartUpHeader += '\\t| {:^40} |\\n'.format(StartUpLine1)\r\n StartUpHeader += '\\t+{:-<42}+\\n'.format('')\r\n StartUpHeader += '\\t| {:40} |\\n'.format(StartUpLine2) \r\n StartUpHeader += '\\t| {:40} |\\n'.format(StartUpLine3) \r\n StartUpHeader += '\\t+{:-<42}+\\n'.format('')\r\n print(StartUpHeader)",
"def exercise_b2_106():\r\n pass",
"def UPPER_START():\n return 1",
"def exercise_b2_113():\r\n pass",
"def begin_turn(self):\n pass",
"def generate_starting_point() -> str:\n starter = ''\n for i in range(len(wf.ANSWER) // wf.SECTION_LENGTH):\n section = list(wf.ANSWER[wf.SECTION_LENGTH * i:wf.SECTION_LENGTH * (i + 1)])\n random.shuffle(section)\n starter = starter + ''.join(section)\n return starter",
"def part_1():\n input_ = parse_input()\n cups = turn_input_into_cups(input_)\n cups = solve(cups, first_cup=cups[input_[0]], turns=100)\n\n answer = []\n current_cup = cups[1].next\n while current_cup != cups[1]:\n answer.append(str(current_cup.number))\n current_cup = current_cup.next\n\n return \"\".join(answer)",
"def coding():\r\n \r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n x=0 #determine the sliding of the letters\r\n \r\n def isKeyEmpty(k):\r\n \"\"\"Utility Function that checks if key is empty\"\"\"\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False\r\n \r\n def set_key(vars): #vars=[0]num,[1]rWord,[2]rString\r\n \"\"\"Function that set the new key\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=vars[0]\r\n if (vars[1]=='yes'):\r\n key['reverse_word']=True\r\n if (vars[2]=='yes'):\r\n key['reverse_string']=True\r\n if (x<-26 or x>26):\r\n x=x%26 #makes x to be in range\r\n if (x==0):\r\n x=random.randrange(-26,26) #random number\r\n for i in range (97,123): #26 ABC letters, ASCII value of 'a' is 97 97+26=123\r\n if(i+x>122):\r\n key[chr(i)]=chr(i-25+x)\r\n elif (i+x<97):\r\n key[chr(i)]=chr(i+26+x)\r\n else:\r\n key[chr(i)]=chr(i+x)\r\n print(\"done\")\r\n \r\n def empty_key():\r\n \"\"\"Function makes current key empty\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=0\r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n print(\"done\")\r\n \r\n def export_key():\r\n \"\"\"Function export key\"\"\"\r\n if(isKeyEmpty(key)):\r\n print(\"key empty\")\r\n else:\r\n return key\r\n \r\n def import_key(key2):\r\n \"\"\"Function import key\"\"\"\r\n nonlocal key\r\n if(isKeyEmpty(key2)):\r\n print(\"key is empty\")\r\n else:\r\n key=key2\r\n print(\"done\")\r\n \r\n def encoding(sentence):\r\n \"\"\"function encoding given string with the key\"\"\"\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if (sentence[i]!=' '):\r\n sentence[i]=key[sentence[i]]\r\n sentence=''.join(sentence)\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n return sentence\r\n \r\n def decoding(sentence):\r\n \"\"\"function decoding given string with the key\"\"\"\r\n if(isKeyEmpty(key)):\r\n return \"key empty\"\r\n helpKey=dict((y,x) for x,y in key.items())\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if(sentence[i]!=' '):\r\n sentence[i]=helpKey[sentence[i]]\r\n sentence=''.join(sentence)\r\n return sentence\r\n\r\n def dispatch(message,var=None):\r\n \"\"\"dispatch with message passing\"\"\"\r\n if message=='set_key':\r\n set_key(var)\r\n elif message=='empty_key':\r\n empty_key()\r\n elif message=='export_key':\r\n return export_key()\r\n elif message=='import_key':\r\n import_key(var)\r\n elif message=='encoding':\r\n return encoding(var)\r\n elif message=='decoding':\r\n return decoding(var)\r\n else:\r\n print(\"Unknown message\") \r\n return dispatch",
"def show_instructions():\n\n print('4-digit Code has been set. Digits in range 1 to 8. You have 12 turns to break it.')",
"def _prepare(self):\n number_of_numbers = 10\n code_length = safe_int_input(\"How long is the code to be guessed? (4-10): \", 4, 10)\n numbers = '1234567890'[:number_of_numbers]\n code = ''.join(random.choices(numbers, k=code_length))",
"def initial_finder(self, seq, ins):\n# print('call initial_finder, input = '+seq)\n letter=seq[0]\n if letter in ins:\n if letter in ['д','т','ц','с']:\n next_letter=seq[:2]\n if next_letter in ins:\n initial=next_letter\n len_init=2\n else:\n initial=letter\n len_init=1\n else:\n initial=letter\n len_init=1 \n else:\n initial='_'\n len_init=0\n# print(initial)\n return initial, len_init",
"def event_m20_11_4000000():\n \"\"\"State 0,2: [Lib] Character: Petrified: Key Guide_SubState\"\"\"\n assert event_m20_11_x37(z94=5300, z95=0, z96=15, z97=211000030, z98=0, z99=1600, z100=6, z101=4000010)\n \"\"\"State 1: Finish\"\"\"\n EndMachine()",
"def challenge1(self):\n self.parse_input()\n\n # Create emulator, with 6 registers\n emulator = Emulator(6)\n\n # Run the program until the halt condition\n self.execute_program(emulator)\n\n print(f\"Final value of registers: {emulator.registers}\")",
"def start():\n # Have the car begin at a stop\n rc.drive.stop()\n\n global width\n global height\n width = rc.camera.get_width()\n height = rc.camera.get_height()\n # rc.drive.set_max_speed(1)\n\n global currentChallenge\n global oldState\n currentChallenge = Challenge.ManualControl\n oldState = Challenge.Line\n\n global colorPriority\n colorPriority = None\n\n global oldCones\n oldCones = None\n\n global last_waypoint_type\n last_waypoint_type = None\n\n # Print start message\n print(\">> Final Challenge - Time Trials\")",
"def start_prime_test():"
] | [
"0.65340734",
"0.6488615",
"0.6457808",
"0.6451548",
"0.62803596",
"0.609721",
"0.5928963",
"0.5832645",
"0.58285034",
"0.566085",
"0.55587",
"0.54121333",
"0.5318155",
"0.5305756",
"0.52829593",
"0.5197971",
"0.516507",
"0.516474",
"0.515549",
"0.5136996",
"0.510763",
"0.51006144",
"0.5099673",
"0.5098972",
"0.50987595",
"0.50940347",
"0.50835526",
"0.5069224",
"0.50631833",
"0.5036899"
] | 0.6653454 | 0 |
Stop Challenge code number 006. | def stop_challenge_code006(self, *chall_code_args, **chall_code_kwargs):
print("This is stop_challenge_code006 from ChallengeDefinition #",self.ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stop_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code001 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code004(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code004 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code002(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code002 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code003(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code003 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code005(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code005 from ChallengeDefinition #\",self.ID, sep='')\n # challenge #5, related to test case #5, i.e. test def #5\n # cloud reference (name and region) should be in clouds.yaml file\n # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')\n # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef\n # ChallengeDef suspends/resumes VM\n # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name\n # conn.compute.conn.compute.resume_server(this server id)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # this resume would be the normal challenge stop, but not in the case of this test\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n print(' suspending...')\n conn.compute.resume_server(test_VM_ID)",
"def LOWER_STOP():\n return 13",
"def UPPER_STOP():\n return 6",
"def run_stop_challenge_code(self, *chall_code_args, **chall_code_kwargs):\n try:\n code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1\n # invoke corresponding stop method, via index\n self.stop_challenge_code_list[code_index](*chall_code_args, **chall_code_kwargs)\n except Exception as e:\n print(type(e), e)\n sys.exit()",
"def stop(self):\n return self.writevar('\\xC5\\x98',0,1)",
"def stopTest(self, test):\n self.complete_output()",
"def stop():",
"def stop():",
"def stop():",
"def stop():",
"def stop() -> None:",
"def stop(self,c,data):\r\n self.board.stop()\r\n return True",
"def TerminalClientStop(self, exitCode=200):\n pass",
"def stopFCSscan(self):\n c = \"/cli:python /app:fcs /cmd:stopscan\"\n self.sendCMDstring(c)",
"def stop():\n global total_attempts, successful_stops, running\n timer.stop()\n running = False\n if running == False:\n if counter % 10 == 0 and counter != 0:\n successful_stops += 1\n total_attempts += 1\n elif counter != 0:\n total_attempts += 1",
"def stopMeasurement_pmt_contour(self):\r\n self.pmtTest_contour.aboutToQuitHandler()",
"def Stop():\n timer.stop()\n global total_stop\n global success_stop\n total_stop += 1\n if n % 10 == 0:\n success_stop = success_stop + 1",
"def stopTest(self, test):",
"def stop(self):\n return _spacegrant_swig.invert_bit_sptr_stop(self)",
"def stop():\n if timer.is_running():\n timer.stop()\n global tries, wins, winstreak, losestreak, mood, scorepos\n tries += 1\n if current % 10 == 0:\n wins += 1\n winstreak += 1\n losestreak = 0\n mood = goodmood(winstreak)\n else:\n winstreak = 0\n losestreak += 1\n mood = badmood(losestreak)\n if tries > 9:\n scorepos = (241, 140)\n if wins > 9:\n scorepos = (228, 140)",
"def stop(self) -> None:",
"def stop(self) -> None:"
] | [
"0.7457875",
"0.74417275",
"0.7335157",
"0.72823745",
"0.72266304",
"0.7116391",
"0.7089024",
"0.70095134",
"0.672087",
"0.598565",
"0.5982917",
"0.59029466",
"0.58384687",
"0.5725244",
"0.56605136",
"0.56605136",
"0.56605136",
"0.56605136",
"0.5588998",
"0.5563385",
"0.54503113",
"0.5448832",
"0.5436786",
"0.54257536",
"0.5397244",
"0.5391684",
"0.5362915",
"0.535498",
"0.53507423",
"0.53507423"
] | 0.7494622 | 0 |
Start Challenge code number 008. | def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):
print("This is start_challenge_code008 from ChallengeDefinition #",self.ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code001 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code006(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code006 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code004(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code004 from ChallengeDefinition #\",self.ID, sep='')",
"def LOWER_START():\n return 7",
"def start_challenge_code003(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code003 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code002(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code002 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code005(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code005 from ChallengeDefinition #\",self.ID, sep='')\n # challenge #5, related to test case #5, i.e. test def #5\n # cloud reference (name and region) should be in clouds.yaml file\n # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')\n # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef\n # ChallengeDef suspends/resumes VM\n # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name\n # conn.compute.suspend_server(this server id)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # VM is created arbitrarily, not yet with ONAP\n # Openstack cloud was created by Fuel/MCP, descriptor in clouds.yaml file\n # VM resume done in Horizon (to simulate an ONAP-based recovery)\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n print(' suspending...')\n conn.compute.suspend_server(test_VM_ID)\n # wait a bit before continuing: ensure VM is actually suspended\n wait_seconds = 10\n print(' waiting',wait_seconds,'seconds...')\n time.sleep(wait_seconds)",
"def START_BANK() -> int:\n return 100",
"def startOfTestcase(self):\n pass # nothing to do here. Hence pass statement is called.",
"def start_with_the_beggining(rna: str):\n return 0",
"def LOWER_STOP():\n return 13",
"def start():\n # Have the car begin at a stop\n rc.drive.stop()\n\n global width\n global height\n width = rc.camera.get_width()\n height = rc.camera.get_height()\n # rc.drive.set_max_speed(1)\n\n global currentChallenge\n global oldState\n currentChallenge = Challenge.ManualControl\n oldState = Challenge.Line\n\n global colorPriority\n colorPriority = None\n\n global oldCones\n oldCones = None\n\n global last_waypoint_type\n last_waypoint_type = None\n\n # Print start message\n print(\">> Final Challenge - Time Trials\")",
"def show_instructions():\n\n print('4-digit Code has been set. Digits in range 1 to 8. You have 12 turns to break it.')",
"def day_01_b() -> int:\n return get_first_step_basement(read_instructions('aoc/aoc2015/input/01A.txt'))",
"def start_algorithm(self):\r\n pass",
"def main():\n\n # first lets test with a already created csp:\n csp = create_map_csp()\n solution = backtracking(csp)\n #solution2,assigned = minimum_remaining_values(csp)\n print(solution)\n #print assigned\n\n # and now with our own generated sudoku CSP\n \"\"\"sudokus = read_sudokus()\n csp = create_sudoku_csp(sudokus[1])\n solution = backtracking(csp)\n print sudoku_csp_to_array(solution)\n\"\"\"",
"def UPPER_START():\n return 1",
"def coding():\r\n \r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n x=0 #determine the sliding of the letters\r\n \r\n def isKeyEmpty(k):\r\n \"\"\"Utility Function that checks if key is empty\"\"\"\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False\r\n \r\n def set_key(vars): #vars=[0]num,[1]rWord,[2]rString\r\n \"\"\"Function that set the new key\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=vars[0]\r\n if (vars[1]=='yes'):\r\n key['reverse_word']=True\r\n if (vars[2]=='yes'):\r\n key['reverse_string']=True\r\n if (x<-26 or x>26):\r\n x=x%26 #makes x to be in range\r\n if (x==0):\r\n x=random.randrange(-26,26) #random number\r\n for i in range (97,123): #26 ABC letters, ASCII value of 'a' is 97 97+26=123\r\n if(i+x>122):\r\n key[chr(i)]=chr(i-25+x)\r\n elif (i+x<97):\r\n key[chr(i)]=chr(i+26+x)\r\n else:\r\n key[chr(i)]=chr(i+x)\r\n print(\"done\")\r\n \r\n def empty_key():\r\n \"\"\"Function makes current key empty\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=0\r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n print(\"done\")\r\n \r\n def export_key():\r\n \"\"\"Function export key\"\"\"\r\n if(isKeyEmpty(key)):\r\n print(\"key empty\")\r\n else:\r\n return key\r\n \r\n def import_key(key2):\r\n \"\"\"Function import key\"\"\"\r\n nonlocal key\r\n if(isKeyEmpty(key2)):\r\n print(\"key is empty\")\r\n else:\r\n key=key2\r\n print(\"done\")\r\n \r\n def encoding(sentence):\r\n \"\"\"function encoding given string with the key\"\"\"\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if (sentence[i]!=' '):\r\n sentence[i]=key[sentence[i]]\r\n sentence=''.join(sentence)\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n return sentence\r\n \r\n def decoding(sentence):\r\n \"\"\"function decoding given string with the key\"\"\"\r\n if(isKeyEmpty(key)):\r\n return \"key empty\"\r\n helpKey=dict((y,x) for x,y in key.items())\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if(sentence[i]!=' '):\r\n sentence[i]=helpKey[sentence[i]]\r\n sentence=''.join(sentence)\r\n return sentence\r\n\r\n def dispatch(message,var=None):\r\n \"\"\"dispatch with message passing\"\"\"\r\n if message=='set_key':\r\n set_key(var)\r\n elif message=='empty_key':\r\n empty_key()\r\n elif message=='export_key':\r\n return export_key()\r\n elif message=='import_key':\r\n import_key(var)\r\n elif message=='encoding':\r\n return encoding(var)\r\n elif message=='decoding':\r\n return decoding(var)\r\n else:\r\n print(\"Unknown message\") \r\n return dispatch",
"def _prepare(self):\n number_of_numbers = 10\n code_length = safe_int_input(\"How long is the code to be guessed? (4-10): \", 4, 10)\n numbers = '1234567890'[:number_of_numbers]\n code = ''.join(random.choices(numbers, k=code_length))",
"def exercise_b2_113():\r\n pass",
"def event_m20_11_4000000():\n \"\"\"State 0,2: [Lib] Character: Petrified: Key Guide_SubState\"\"\"\n assert event_m20_11_x37(z94=5300, z95=0, z96=15, z97=211000030, z98=0, z99=1600, z100=6, z101=4000010)\n \"\"\"State 1: Finish\"\"\"\n EndMachine()",
"def getStage(code):\n loc = code.find('x')\n if loc < 0: loc = 4\n if code == \"XXXX\": loc = 0\n return loc",
"def create_code():\n\n code = [0, 0, 0, 0]\n\n for i in range(4):\n value = random.randint(1, 8) # 8 possible digits\n while value in code:\n value = random.randint(1, 8) # 8 possible digits\n code[i] = value\n \n #print(code)\n return code",
"def _prepare(self):\n self.code = random.randint(1000,9999)\n self.user_guess.append(\"----\")\n self.user_guess.append(\"----\")\n self.applied_guess.append(\"****\")\n self.applied_guess.append(\"****\")",
"def exercise_b2_106():\r\n pass",
"def initial_finder(self, seq, ins):\n# print('call initial_finder, input = '+seq)\n letter=seq[0]\n if letter in ins:\n if letter in ['д','т','ц','с']:\n next_letter=seq[:2]\n if next_letter in ins:\n initial=next_letter\n len_init=2\n else:\n initial=letter\n len_init=1\n else:\n initial=letter\n len_init=1 \n else:\n initial='_'\n len_init=0\n# print(initial)\n return initial, len_init",
"def _load_home_control_byte(self):\n if self.home_limit == 1:\n bs = \"00100001\"\n else:\n bs = \"00100010\"\n\n return int(bs, 2)"
] | [
"0.6656827",
"0.6595131",
"0.6548328",
"0.6291659",
"0.6257462",
"0.6218843",
"0.6121715",
"0.59146804",
"0.59141225",
"0.5684973",
"0.55377156",
"0.55277926",
"0.5444801",
"0.5411079",
"0.5370827",
"0.53679067",
"0.5286541",
"0.52723044",
"0.52424043",
"0.52335536",
"0.52281475",
"0.5212467",
"0.5165134",
"0.51452786",
"0.51411927",
"0.51371384",
"0.50859475",
"0.50792706",
"0.5066773",
"0.5061809"
] | 0.6907975 | 0 |
Stop Challenge code number 008. | def stop_challenge_code008(self, *chall_code_args, **chall_code_kwargs):
print("This is stop_challenge_code008 from ChallengeDefinition #",self.ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stop_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code001 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code006(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code006 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code004(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code004 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code002(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code002 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code003(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code003 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code005(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code005 from ChallengeDefinition #\",self.ID, sep='')\n # challenge #5, related to test case #5, i.e. test def #5\n # cloud reference (name and region) should be in clouds.yaml file\n # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')\n # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef\n # ChallengeDef suspends/resumes VM\n # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name\n # conn.compute.conn.compute.resume_server(this server id)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # this resume would be the normal challenge stop, but not in the case of this test\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n print(' suspending...')\n conn.compute.resume_server(test_VM_ID)",
"def LOWER_STOP():\n return 13",
"def stop(self):\n return self.writevar('\\xC5\\x98',0,1)",
"def run_stop_challenge_code(self, *chall_code_args, **chall_code_kwargs):\n try:\n code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1\n # invoke corresponding stop method, via index\n self.stop_challenge_code_list[code_index](*chall_code_args, **chall_code_kwargs)\n except Exception as e:\n print(type(e), e)\n sys.exit()",
"def UPPER_STOP():\n return 6",
"def stop():",
"def stop():",
"def stop():",
"def stop():",
"def stop(self,c,data):\r\n self.board.stop()\r\n return True",
"def stop() -> None:",
"def stop(self):\n return _spacegrant_swig.invert_bit_sptr_stop(self)",
"def TerminalClientStop(self, exitCode=200):\n pass",
"def stopTest(self, test):\n self.complete_output()",
"def Stop():\n timer.stop()\n global total_stop\n global success_stop\n total_stop += 1\n if n % 10 == 0:\n success_stop = success_stop + 1",
"def stop(self) -> None:",
"def stop(self) -> None:",
"def stopMeasurement_pmt_contour(self):\r\n self.pmtTest_contour.aboutToQuitHandler()",
"def stop():\n global total_attempts, successful_stops, running\n timer.stop()\n running = False\n if running == False:\n if counter % 10 == 0 and counter != 0:\n successful_stops += 1\n total_attempts += 1\n elif counter != 0:\n total_attempts += 1",
"def stop_procedure(self):\n pass",
"def InterfaceClientStop(self, exitCode=200): \n pass",
"def manual_stop(self):\n self.manual_seqnum = 0\n return self.send(\"app_rc_end\")"
] | [
"0.754257",
"0.7433333",
"0.73090404",
"0.7306642",
"0.7260581",
"0.7215979",
"0.7146896",
"0.70619434",
"0.6740347",
"0.61863214",
"0.60304695",
"0.59986854",
"0.58392316",
"0.5830099",
"0.5830099",
"0.5830099",
"0.5830099",
"0.5789749",
"0.57684064",
"0.5594122",
"0.5588689",
"0.5550729",
"0.5516124",
"0.5497803",
"0.5497803",
"0.5496964",
"0.5477644",
"0.54755497",
"0.5471284",
"0.5452335"
] | 0.7724662 | 0 |
Start Challenge code number 009. | def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):
print("This is start_challenge_code009 from ChallengeDefinition #",self.ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code001 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code006(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code006 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code004(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code004 from ChallengeDefinition #\",self.ID, sep='')",
"def LOWER_START():\n return 7",
"def start_challenge_code002(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code002 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code003(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code003 from ChallengeDefinition #\",self.ID, sep='')",
"def start_with_the_beggining(rna: str):\n return 0",
"def START_BANK() -> int:\n return 100",
"def startOfTestcase(self):\n pass # nothing to do here. Hence pass statement is called.",
"def coding():\r\n \r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n x=0 #determine the sliding of the letters\r\n \r\n def isKeyEmpty(k):\r\n \"\"\"Utility Function that checks if key is empty\"\"\"\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False\r\n \r\n def set_key(vars): #vars=[0]num,[1]rWord,[2]rString\r\n \"\"\"Function that set the new key\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=vars[0]\r\n if (vars[1]=='yes'):\r\n key['reverse_word']=True\r\n if (vars[2]=='yes'):\r\n key['reverse_string']=True\r\n if (x<-26 or x>26):\r\n x=x%26 #makes x to be in range\r\n if (x==0):\r\n x=random.randrange(-26,26) #random number\r\n for i in range (97,123): #26 ABC letters, ASCII value of 'a' is 97 97+26=123\r\n if(i+x>122):\r\n key[chr(i)]=chr(i-25+x)\r\n elif (i+x<97):\r\n key[chr(i)]=chr(i+26+x)\r\n else:\r\n key[chr(i)]=chr(i+x)\r\n print(\"done\")\r\n \r\n def empty_key():\r\n \"\"\"Function makes current key empty\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=0\r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n print(\"done\")\r\n \r\n def export_key():\r\n \"\"\"Function export key\"\"\"\r\n if(isKeyEmpty(key)):\r\n print(\"key empty\")\r\n else:\r\n return key\r\n \r\n def import_key(key2):\r\n \"\"\"Function import key\"\"\"\r\n nonlocal key\r\n if(isKeyEmpty(key2)):\r\n print(\"key is empty\")\r\n else:\r\n key=key2\r\n print(\"done\")\r\n \r\n def encoding(sentence):\r\n \"\"\"function encoding given string with the key\"\"\"\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if (sentence[i]!=' '):\r\n sentence[i]=key[sentence[i]]\r\n sentence=''.join(sentence)\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n return sentence\r\n \r\n def decoding(sentence):\r\n \"\"\"function decoding given string with the key\"\"\"\r\n if(isKeyEmpty(key)):\r\n return \"key empty\"\r\n helpKey=dict((y,x) for x,y in key.items())\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if(sentence[i]!=' '):\r\n sentence[i]=helpKey[sentence[i]]\r\n sentence=''.join(sentence)\r\n return sentence\r\n\r\n def dispatch(message,var=None):\r\n \"\"\"dispatch with message passing\"\"\"\r\n if message=='set_key':\r\n set_key(var)\r\n elif message=='empty_key':\r\n empty_key()\r\n elif message=='export_key':\r\n return export_key()\r\n elif message=='import_key':\r\n import_key(var)\r\n elif message=='encoding':\r\n return encoding(var)\r\n elif message=='decoding':\r\n return decoding(var)\r\n else:\r\n print(\"Unknown message\") \r\n return dispatch",
"def initiate_codes(lottery_file):\n # load the lottery data\n lottery = {}\n with open(lottery_file) as lf:\n head = lf.readline()\n prev = None\n for line in lf:\n info = line.strip().split('|')\n issue = info[0]\n nums = map(int, info[1:])\n lottery[issue] = {\"numbers\": nums, \"previous\":prev, \"issue\": issue}\n prev = issue\n\n # get the missing info for 20150901001\n issues = sorted(lottery.keys())\n lot_miss_info = {}\n for issue in issues[100:]:\n lot_miss_info[issue] = {}\n # 0: ten thousand, 1: thousand, 2: hundred, 3: ten, 4: unit\n for i in range(5):\n lot_miss_info[issue][i] = {}\n for dig in range(10):\n lot_miss_info[issue][i][dig] = 0\n mis_count = 0\n # trace back and get the previous appearence\n cur = issue\n while True:\n lot = lottery[cur]\n if lot[\"numbers\"][i] == dig:\n break\n else:\n mis_count += 1\n cur = lot[\"previous\"]\n lot_miss_info[issue][i][dig] = mis_count\n\n # compute the codes information\n codes = {}\n for issue in issues[100:]:\n # currently we only consider unit(4) and ten(3) digit codes\n # we have defined 7 codes\n # luo_ma: 当前中奖数字\n # leng_1_ma: 当前期中最大间隔的数字\n # leng_2_ma: 当前期中第二大间隔的数字\n # sha_ma: 十位(落码-1), 个位(落码*3+3)\n # chuan_1: 落码-1\n # chuan_2: 落码+1\n # 隔码: 上一期的落码\n codes[issue] = {}\n for dig in range(3, 5):\n code = compute_code(issue, dig, lottery, lot_miss_info)\n codes[issue][dig] = code\n\n # compute the match information\n matched = {} # 只匹配落/杀/冷12码\n full_matched = {}# 匹配所有6码\n match_keys = [\"luo_ma\", \"leng_1_ma\", \"leng_2_ma\", \"sha_ma\"]\n \n full_match_keys = match_keys + [\"chuan_1\", \"chuan_2\", \"ge_ma\"]\n for issue in issues[101:]:\n prev_id = lottery[issue][\"previous\"]\n numbers = lottery[issue][\"numbers\"]\n prev_code = codes[prev_id]\n flag, full_flag = update_match(lottery[issue], prev_code)\n matched[issue] = flag\n full_matched[issue] = full_flag\n\n # compute the l4z1hbz\n l4z1hbz_seq = {}\n for issue in issues[108:]:\n l4z1hbz_seq[issue] = compute_l4z1hbz(issue, matched, lottery)\n\n return lottery, lot_miss_info, codes, matched, full_matched, l4z1hbz_seq",
"def day_01_b() -> int:\n return get_first_step_basement(read_instructions('aoc/aoc2015/input/01A.txt'))",
"def _prepare(self):\n number_of_numbers = 10\n code_length = safe_int_input(\"How long is the code to be guessed? (4-10): \", 4, 10)\n numbers = '1234567890'[:number_of_numbers]\n code = ''.join(random.choices(numbers, k=code_length))",
"def checkdigit(code):\n check = sum((i+1)*int(code[i]) for i in range(9)) % 11\n return 'X' if check == 10 else str(check)",
"def start():\n # Have the car begin at a stop\n rc.drive.stop()\n\n global width\n global height\n width = rc.camera.get_width()\n height = rc.camera.get_height()\n # rc.drive.set_max_speed(1)\n\n global currentChallenge\n global oldState\n currentChallenge = Challenge.ManualControl\n oldState = Challenge.Line\n\n global colorPriority\n colorPriority = None\n\n global oldCones\n oldCones = None\n\n global last_waypoint_type\n last_waypoint_type = None\n\n # Print start message\n print(\">> Final Challenge - Time Trials\")",
"def show_instructions():\n\n print('4-digit Code has been set. Digits in range 1 to 8. You have 12 turns to break it.')",
"def UPPER_START():\n return 1",
"def LOWER_STOP():\n return 13",
"def getFirstChar(self):\n if self.i1 is None:\n self.firstChar = None\n else:\n chrNum = int(self.i1 // 10)\n if chrNum < 26:\n # should result in something like A4 for 4, B6 for 16\n self.firstChar = chr(ASCII_LETTER_A + chrNum) + str(self.i1 % 10)\n else:\n runLog.warning(\n \"invalid location. ring {0} is too many rings!\".format(self.i1),\n self,\n )",
"def test_hackerrank_sample2(self):\n result = find_digits(1012)\n self.assertEquals(result, 3)",
"def start_challenge_code005(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code005 from ChallengeDefinition #\",self.ID, sep='')\n # challenge #5, related to test case #5, i.e. test def #5\n # cloud reference (name and region) should be in clouds.yaml file\n # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')\n # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef\n # ChallengeDef suspends/resumes VM\n # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name\n # conn.compute.suspend_server(this server id)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # VM is created arbitrarily, not yet with ONAP\n # Openstack cloud was created by Fuel/MCP, descriptor in clouds.yaml file\n # VM resume done in Horizon (to simulate an ONAP-based recovery)\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n print(' suspending...')\n conn.compute.suspend_server(test_VM_ID)\n # wait a bit before continuing: ensure VM is actually suspended\n wait_seconds = 10\n print(' waiting',wait_seconds,'seconds...')\n time.sleep(wait_seconds)",
"def start_algorithm(self):\r\n pass",
"def begin_turn(self):\n pass",
"def challenge() : \n\treturn [random.randint(1,9) for i in range(5)]",
"def startGame():\n\n\tprint(\"\\nOK! Let's play!\")\n\tprint(\"--------------------------------------------------------------------------------------\")\n\tprint(\"Note:\")\n\tprint(\"\\tNow you must be kept in your mind a random integer from specific range and I must be guessing that number!\")\n\tprint(\"\\tIf you answer honestly all of my questions I certainly will guess that number!\")\n\tprint(\"--------------------------------------------------------------------------------------\\n\")\n\tgameLogic()",
"def _prepare(self):\n for n in range(4):\n self._code += str(random.randint(1, 9))",
"def code(char):\n return int(char) if char.isdigit() else letter_code(char)"
] | [
"0.6648422",
"0.640778",
"0.64032894",
"0.60787034",
"0.59268326",
"0.59111047",
"0.5875077",
"0.5706066",
"0.56907415",
"0.5675535",
"0.55911005",
"0.5429949",
"0.5295225",
"0.5281509",
"0.52547747",
"0.5254549",
"0.5201935",
"0.519219",
"0.5188961",
"0.517877",
"0.5177587",
"0.5125417",
"0.51246846",
"0.51124305",
"0.5090209",
"0.5079435",
"0.5051197",
"0.50384545",
"0.50350696",
"0.5008958"
] | 0.6802339 | 0 |
Stop Challenge code number 009. | def stop_challenge_code009(self, *chall_code_args, **chall_code_kwargs):
print("This is stop_challenge_code009 from ChallengeDefinition #",self.ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stop_challenge_code010(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code010 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code001 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code006(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code006 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code002(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code002 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code004(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code004 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code003(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code003 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code005(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code005 from ChallengeDefinition #\",self.ID, sep='')\n # challenge #5, related to test case #5, i.e. test def #5\n # cloud reference (name and region) should be in clouds.yaml file\n # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')\n # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef\n # ChallengeDef suspends/resumes VM\n # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name\n # conn.compute.conn.compute.resume_server(this server id)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # this resume would be the normal challenge stop, but not in the case of this test\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n print(' suspending...')\n conn.compute.resume_server(test_VM_ID)",
"def LOWER_STOP():\n return 13",
"def run_stop_challenge_code(self, *chall_code_args, **chall_code_kwargs):\n try:\n code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1\n # invoke corresponding stop method, via index\n self.stop_challenge_code_list[code_index](*chall_code_args, **chall_code_kwargs)\n except Exception as e:\n print(type(e), e)\n sys.exit()",
"def stop():\n global total_attempts, successful_stops, running\n timer.stop()\n running = False\n if running == False:\n if counter % 10 == 0 and counter != 0:\n successful_stops += 1\n total_attempts += 1\n elif counter != 0:\n total_attempts += 1",
"def stop(self):\n return self.writevar('\\xC5\\x98',0,1)",
"def Stop():\n timer.stop()\n global total_stop\n global success_stop\n total_stop += 1\n if n % 10 == 0:\n success_stop = success_stop + 1",
"def UPPER_STOP():\n return 6",
"def stop(self,c,data):\r\n self.board.stop()\r\n return True",
"def stop():",
"def stop():",
"def stop():",
"def stop():",
"def stop() -> None:",
"def stop():\n if timer.is_running():\n timer.stop()\n global tries, wins, winstreak, losestreak, mood, scorepos\n tries += 1\n if current % 10 == 0:\n wins += 1\n winstreak += 1\n losestreak = 0\n mood = goodmood(winstreak)\n else:\n winstreak = 0\n losestreak += 1\n mood = badmood(losestreak)\n if tries > 9:\n scorepos = (241, 140)\n if wins > 9:\n scorepos = (228, 140)",
"def TerminalClientStop(self, exitCode=200):\n pass",
"def stopTest(self, test):\n self.complete_output()",
"def manual_stop(self):\n self.manual_seqnum = 0\n return self.send(\"app_rc_end\")",
"def stopMeasurement_pmt_contour(self):\r\n self.pmtTest_contour.aboutToQuitHandler()",
"def stop(self) -> None:",
"def stop(self) -> None:",
"def test_stop_competition(self):\n competition = self._create_competition()\n\n # Start competition\n competition.start(duration=30 * 60)\n\n # Stop competition\n competition.stop()\n self.assertNotEquals(competition.endTime, None)\n self.assertEquals(competition.status, COMPETITION_STATUSES[3][0])",
"def stop_procedure(self):\n pass"
] | [
"0.749207",
"0.73590344",
"0.7186478",
"0.7128835",
"0.7005895",
"0.7001801",
"0.69926596",
"0.6907655",
"0.6287855",
"0.6134733",
"0.5850997",
"0.582932",
"0.5804916",
"0.57821167",
"0.5730547",
"0.56695604",
"0.5564659",
"0.5564659",
"0.5564659",
"0.5564659",
"0.55636364",
"0.5528071",
"0.5517227",
"0.5456195",
"0.53842556",
"0.5378606",
"0.5339699",
"0.5339699",
"0.5334318",
"0.5331904"
] | 0.75401205 | 0 |
Start Challenge code number 010. | def start_challenge_code010(self, *chall_code_args, **chall_code_kwargs):
print("This is start_challenge_code010 from ChallengeDefinition #",self.ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code001 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code006(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code006 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code004(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code004 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code002(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code002 from ChallengeDefinition #\",self.ID, sep='')",
"def start_challenge_code003(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code003 from ChallengeDefinition #\",self.ID, sep='')",
"def LOWER_START():\n return 7",
"def start():\n # Have the car begin at a stop\n rc.drive.stop()\n\n global width\n global height\n width = rc.camera.get_width()\n height = rc.camera.get_height()\n # rc.drive.set_max_speed(1)\n\n global currentChallenge\n global oldState\n currentChallenge = Challenge.ManualControl\n oldState = Challenge.Line\n\n global colorPriority\n colorPriority = None\n\n global oldCones\n oldCones = None\n\n global last_waypoint_type\n last_waypoint_type = None\n\n # Print start message\n print(\">> Final Challenge - Time Trials\")",
"def start_algorithm(self):\r\n pass",
"def START_BANK() -> int:\n return 100",
"def startGame():\n\n\tprint(\"\\nOK! Let's play!\")\n\tprint(\"--------------------------------------------------------------------------------------\")\n\tprint(\"Note:\")\n\tprint(\"\\tNow you must be kept in your mind a random integer from specific range and I must be guessing that number!\")\n\tprint(\"\\tIf you answer honestly all of my questions I certainly will guess that number!\")\n\tprint(\"--------------------------------------------------------------------------------------\\n\")\n\tgameLogic()",
"def startOfTestcase(self):\n pass # nothing to do here. Hence pass statement is called.",
"def show_instructions():\n\n print('4-digit Code has been set. Digits in range 1 to 8. You have 12 turns to break it.')",
"def start_with_the_beggining(rna: str):\n return 0",
"def initiate_codes(lottery_file):\n # load the lottery data\n lottery = {}\n with open(lottery_file) as lf:\n head = lf.readline()\n prev = None\n for line in lf:\n info = line.strip().split('|')\n issue = info[0]\n nums = map(int, info[1:])\n lottery[issue] = {\"numbers\": nums, \"previous\":prev, \"issue\": issue}\n prev = issue\n\n # get the missing info for 20150901001\n issues = sorted(lottery.keys())\n lot_miss_info = {}\n for issue in issues[100:]:\n lot_miss_info[issue] = {}\n # 0: ten thousand, 1: thousand, 2: hundred, 3: ten, 4: unit\n for i in range(5):\n lot_miss_info[issue][i] = {}\n for dig in range(10):\n lot_miss_info[issue][i][dig] = 0\n mis_count = 0\n # trace back and get the previous appearence\n cur = issue\n while True:\n lot = lottery[cur]\n if lot[\"numbers\"][i] == dig:\n break\n else:\n mis_count += 1\n cur = lot[\"previous\"]\n lot_miss_info[issue][i][dig] = mis_count\n\n # compute the codes information\n codes = {}\n for issue in issues[100:]:\n # currently we only consider unit(4) and ten(3) digit codes\n # we have defined 7 codes\n # luo_ma: 当前中奖数字\n # leng_1_ma: 当前期中最大间隔的数字\n # leng_2_ma: 当前期中第二大间隔的数字\n # sha_ma: 十位(落码-1), 个位(落码*3+3)\n # chuan_1: 落码-1\n # chuan_2: 落码+1\n # 隔码: 上一期的落码\n codes[issue] = {}\n for dig in range(3, 5):\n code = compute_code(issue, dig, lottery, lot_miss_info)\n codes[issue][dig] = code\n\n # compute the match information\n matched = {} # 只匹配落/杀/冷12码\n full_matched = {}# 匹配所有6码\n match_keys = [\"luo_ma\", \"leng_1_ma\", \"leng_2_ma\", \"sha_ma\"]\n \n full_match_keys = match_keys + [\"chuan_1\", \"chuan_2\", \"ge_ma\"]\n for issue in issues[101:]:\n prev_id = lottery[issue][\"previous\"]\n numbers = lottery[issue][\"numbers\"]\n prev_code = codes[prev_id]\n flag, full_flag = update_match(lottery[issue], prev_code)\n matched[issue] = flag\n full_matched[issue] = full_flag\n\n # compute the l4z1hbz\n l4z1hbz_seq = {}\n for issue in issues[108:]:\n l4z1hbz_seq[issue] = compute_l4z1hbz(issue, matched, lottery)\n\n return lottery, lot_miss_info, codes, matched, full_matched, l4z1hbz_seq",
"def _prepare(self):\n number_of_numbers = 10\n code_length = safe_int_input(\"How long is the code to be guessed? (4-10): \", 4, 10)\n numbers = '1234567890'[:number_of_numbers]\n code = ''.join(random.choices(numbers, k=code_length))",
"def start_prime_test():",
"def start_challenge_code005(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is start_challenge_code005 from ChallengeDefinition #\",self.ID, sep='')\n # challenge #5, related to test case #5, i.e. test def #5\n # cloud reference (name and region) should be in clouds.yaml file\n # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')\n # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef\n # ChallengeDef suspends/resumes VM\n # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name\n # conn.compute.suspend_server(this server id)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # VM is created arbitrarily, not yet with ONAP\n # Openstack cloud was created by Fuel/MCP, descriptor in clouds.yaml file\n # VM resume done in Horizon (to simulate an ONAP-based recovery)\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n print(' suspending...')\n conn.compute.suspend_server(test_VM_ID)\n # wait a bit before continuing: ensure VM is actually suspended\n wait_seconds = 10\n print(' waiting',wait_seconds,'seconds...')\n time.sleep(wait_seconds)",
"def _sendStart_result (self, (code, data)) :\n\n assert code == \"REPLY_SUCCESS\"\n\n return code",
"def challenge() : \n\treturn [random.randint(1,9) for i in range(5)]",
"def run_start_challenge_code(self, *chall_code_args, **chall_code_kwargs):\n\n try:\n code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1\n # invoke corresponding start method, via index\n self.start_challenge_code_list[code_index](*chall_code_args, **chall_code_kwargs)\n except Exception as e:\n print(type(e), e)\n sys.exit()",
"def checkdigit(code):\n check = sum((i+1)*int(code[i]) for i in range(9)) % 11\n return 'X' if check == 10 else str(check)",
"def coding():\r\n \r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n x=0 #determine the sliding of the letters\r\n \r\n def isKeyEmpty(k):\r\n \"\"\"Utility Function that checks if key is empty\"\"\"\r\n if k=={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}:\r\n return True\r\n return False\r\n \r\n def set_key(vars): #vars=[0]num,[1]rWord,[2]rString\r\n \"\"\"Function that set the new key\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=vars[0]\r\n if (vars[1]=='yes'):\r\n key['reverse_word']=True\r\n if (vars[2]=='yes'):\r\n key['reverse_string']=True\r\n if (x<-26 or x>26):\r\n x=x%26 #makes x to be in range\r\n if (x==0):\r\n x=random.randrange(-26,26) #random number\r\n for i in range (97,123): #26 ABC letters, ASCII value of 'a' is 97 97+26=123\r\n if(i+x>122):\r\n key[chr(i)]=chr(i-25+x)\r\n elif (i+x<97):\r\n key[chr(i)]=chr(i+26+x)\r\n else:\r\n key[chr(i)]=chr(i+x)\r\n print(\"done\")\r\n \r\n def empty_key():\r\n \"\"\"Function makes current key empty\"\"\"\r\n nonlocal key\r\n nonlocal x\r\n x=0\r\n key={'reverse_word': False, 'reverse_string': False, 'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd', 'e': 'e', 'f': 'f', 'g': 'g', 'h': 'h',\r\n 'i': 'i', 'j': 'j', 'k': 'j', 'l': 'l', 'm': 'm', 'n': 'n', 'o': 'o', 'p': 'p', 'q': 'q', 'r': 'r', 's': 's', 't': 't', 'u': 'u',\r\n 'v': 'v', 'w': 'w', 'x':'x', 'y': 'y', 'z': 'z'}\r\n print(\"done\")\r\n \r\n def export_key():\r\n \"\"\"Function export key\"\"\"\r\n if(isKeyEmpty(key)):\r\n print(\"key empty\")\r\n else:\r\n return key\r\n \r\n def import_key(key2):\r\n \"\"\"Function import key\"\"\"\r\n nonlocal key\r\n if(isKeyEmpty(key2)):\r\n print(\"key is empty\")\r\n else:\r\n key=key2\r\n print(\"done\")\r\n \r\n def encoding(sentence):\r\n \"\"\"function encoding given string with the key\"\"\"\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if (sentence[i]!=' '):\r\n sentence[i]=key[sentence[i]]\r\n sentence=''.join(sentence)\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n return sentence\r\n \r\n def decoding(sentence):\r\n \"\"\"function decoding given string with the key\"\"\"\r\n if(isKeyEmpty(key)):\r\n return \"key empty\"\r\n helpKey=dict((y,x) for x,y in key.items())\r\n if(key['reverse_word']==True):\r\n splitT=tuple(sentence.split(' '))\r\n splitT=map(lambda x:x[::-1],splitT)\r\n sentence=' '.join(splitT)\r\n if(key['reverse_string']==True):\r\n splitList=sentence.split(' ')\r\n splitList=splitList[-1::-1]\r\n sentence=' '.join(splitList)\r\n sentence=list(sentence)\r\n for i in range(len(sentence)):\r\n if(sentence[i]!=' '):\r\n sentence[i]=helpKey[sentence[i]]\r\n sentence=''.join(sentence)\r\n return sentence\r\n\r\n def dispatch(message,var=None):\r\n \"\"\"dispatch with message passing\"\"\"\r\n if message=='set_key':\r\n set_key(var)\r\n elif message=='empty_key':\r\n empty_key()\r\n elif message=='export_key':\r\n return export_key()\r\n elif message=='import_key':\r\n import_key(var)\r\n elif message=='encoding':\r\n return encoding(var)\r\n elif message=='decoding':\r\n return decoding(var)\r\n else:\r\n print(\"Unknown message\") \r\n return dispatch",
"def challenge1(self):\n self.parse_input()\n\n # Create emulator, with 6 registers\n emulator = Emulator(6)\n\n # Run the program until the halt condition\n self.execute_program(emulator)\n\n print(f\"Final value of registers: {emulator.registers}\")",
"def main():\n import sys\n plain = raw_input(\"Please enter the plaintext string you want to encode: \")\n print \"Here are the ASCII codes for that text, space-separated:\"\n for e in plain:\n print ord(e),\n print\n if raw_input(\"Press RETURN to exit.\"):\n sys.exit(0)",
"def main():\n return 0",
"def main():\n return 0",
"def main_f():\n ph_number = read_number()\n if ph_number == -1:\n print('Incorrect number, try again')\n return\n res_l = find_let(ph_number, 0)\n output_result(res_l)"
] | [
"0.671225",
"0.6593957",
"0.6436121",
"0.62023723",
"0.601001",
"0.60060525",
"0.5793211",
"0.57471645",
"0.566537",
"0.5600813",
"0.5577354",
"0.55703545",
"0.54807603",
"0.5478958",
"0.54334855",
"0.5417967",
"0.5413426",
"0.5368082",
"0.53440857",
"0.5312565",
"0.5241798",
"0.51922154",
"0.5167022",
"0.5165948",
"0.51430917",
"0.5110764",
"0.5109679",
"0.504366",
"0.504366",
"0.50423944"
] | 0.70041275 | 0 |
Stop Challenge code number 010. | def stop_challenge_code010(self, *chall_code_args, **chall_code_kwargs):
print("This is stop_challenge_code010 from ChallengeDefinition #",self.ID, sep='') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stop_challenge_code009(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code009 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code008(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code008 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code001(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code001 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code007(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code007 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code004(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code004 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code002(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code002 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code006(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code006 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code003(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code003 from ChallengeDefinition #\",self.ID, sep='')",
"def stop_challenge_code005(self, *chall_code_args, **chall_code_kwargs):\n print(\"This is stop_challenge_code005 from ChallengeDefinition #\",self.ID, sep='')\n # challenge #5, related to test case #5, i.e. test def #5\n # cloud reference (name and region) should be in clouds.yaml file\n # conn = openstack.connect(cloud='cloudNameForChallenge005', region_name='regionNameForChallenge005')\n # TestDef knows VNF, gets VNF->VM mapping from ONAP, passes VM ref to ChallengeDef\n # ChallengeDef suspends/resumes VM\n # conn.compute.servers() to get list of servers, using VM ID, check server.id and/or server.name\n # conn.compute.conn.compute.resume_server(this server id)\n\n # June 2018, test of code logic, using newly released OpenStack SDK 0.14.0\n # this resume would be the normal challenge stop, but not in the case of this test\n conn = openstack.connect(cloud='unh-hpe-openstack-fraser', region_name='RegionOne')\n test_VM_ID = '5d07da11-0e85-4256-9894-482dcee4a5f0' # arbitrary in this test, grab from OpenStack\n test_VM = conn.compute.get_server(test_VM_ID)\n print(' test_VM.name=',test_VM.name)\n print(' test_VM.status=',test_VM.status)\n print(' suspending...')\n conn.compute.resume_server(test_VM_ID)",
"def Stop():\n timer.stop()\n global total_stop\n global success_stop\n total_stop += 1\n if n % 10 == 0:\n success_stop = success_stop + 1",
"def run_stop_challenge_code(self, *chall_code_args, **chall_code_kwargs):\n try:\n code_index = self.challenge_code_ID - 1 # lists are indexed from 0 to N-1\n # invoke corresponding stop method, via index\n self.stop_challenge_code_list[code_index](*chall_code_args, **chall_code_kwargs)\n except Exception as e:\n print(type(e), e)\n sys.exit()",
"def stop():\n global total_attempts, successful_stops, running\n timer.stop()\n running = False\n if running == False:\n if counter % 10 == 0 and counter != 0:\n successful_stops += 1\n total_attempts += 1\n elif counter != 0:\n total_attempts += 1",
"def stop() -> None:",
"def TerminalClientStop(self, exitCode=200):\n pass",
"def stop():",
"def stop():",
"def stop():",
"def stop():",
"def stop(self):\n return self.writevar('\\xC5\\x98',0,1)",
"def LOWER_STOP():\n return 13",
"def stop(self) -> None:",
"def stop(self) -> None:",
"def stop(self,c,data):\r\n self.board.stop()\r\n return True",
"def stop_procedure(self):\n pass",
"def InterfaceClientStop(self, exitCode=200): \n pass",
"def stop():\n if timer.is_running():\n timer.stop()\n global tries, wins, winstreak, losestreak, mood, scorepos\n tries += 1\n if current % 10 == 0:\n wins += 1\n winstreak += 1\n losestreak = 0\n mood = goodmood(winstreak)\n else:\n winstreak = 0\n losestreak += 1\n mood = badmood(losestreak)\n if tries > 9:\n scorepos = (241, 140)\n if wins > 9:\n scorepos = (228, 140)",
"def stop(self) -> None:\n ...",
"def stopTest(self, test):\n self.complete_output()",
"def stop(self):",
"def stop(self):"
] | [
"0.74246866",
"0.73739845",
"0.72364855",
"0.7176849",
"0.7037037",
"0.70282507",
"0.702119",
"0.6923483",
"0.6321979",
"0.6134291",
"0.6054497",
"0.60173416",
"0.60113186",
"0.6009146",
"0.59889483",
"0.59889483",
"0.59889483",
"0.59889483",
"0.5880148",
"0.5830737",
"0.5748104",
"0.5748104",
"0.5723866",
"0.5683963",
"0.565784",
"0.5652068",
"0.564921",
"0.5636502",
"0.5632406",
"0.5632406"
] | 0.765452 | 0 |
Function to initialize challenge definition data. | def init_challenge_definitions():
challenge_defs = []
# add info to list in memory, one by one, following signature values
chall_def_ID = 5
chall_def_name = "VM failure"
chall_def_challengeType = ChallengeType.CLOUD_COMPUTE_FAILURE
chall_def_recipientID = 1
chall_def_impactedCloudResourcesInfo = "OpenStack VM on ctl02 in Arm pod"
chall_def_impactedCloudResourceIDs = [2]
chall_def_impactedPhysResourcesInfo = "physical server XYZ"
chall_def_impactedPhysResourceIDs = [1]
chall_def_startChallengeCLICommandSent = "service nova-compute stop"
chall_def_stopChallengeCLICommandSent = "service nova-compute restart"
# OpenStack VM Suspend vs. Pause: suspend stores the state of VM on disk while pause stores it in memory (RAM)
# in CLI:
# $ nova suspend NAME
# $ nova resume NAME
# but better use OpenStack SDK
chall_def_startChallengeAPICommandSent = []
chall_def_stopChallengeAPICommandSent = []
chall_def_codeID = 5
challenge_defs.append(ChallengeDefinition(chall_def_ID, chall_def_name,
chall_def_challengeType,
chall_def_recipientID,
chall_def_impactedCloudResourcesInfo,
chall_def_impactedCloudResourceIDs,
chall_def_impactedPhysResourcesInfo,
chall_def_impactedPhysResourceIDs,
chall_def_startChallengeCLICommandSent,
chall_def_stopChallengeCLICommandSent,
chall_def_startChallengeAPICommandSent,
chall_def_stopChallengeAPICommandSent,
chall_def_codeID))
# write list to binary file
write_list_bin(challenge_defs, FILE_CHALLENGE_DEFINITIONS)
return challenge_defs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, data=None):\n self.problems = {}\n if data is not None:\n self.update(data)",
"def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()",
"def __init__(self):\n # Dict of minecraft object in form of \"dict[id] = name\"\n self.data_values = dict()\n self.parser = self.setup_parser()",
"def setUp(self):\n self.family = Family()\n self.decoder = Decoder()\n self.data1 = ['Atya', 'Sister-In-Law']\n self.data2 = ['Satya', 'Ketu', 'Male']",
"def __init__(self):\n self.TECRDB_compounds_data_dict = {}\n self.TECRDB_compounds_pH7_species_id_dict = {}\n self.TECRDB_compounds_least_H_sid_dict = {}\n self.get_TECRDB_compounds_data()",
"def __init__(self):\n\n # initialise the empty mappings dictionary\n self.data = {\n 'loan_id': None,\n 'product': None,\n 'origination_date': None,\n 'reversion_date': None,\n 'rate_term': None,\n 'loan_amount': None,\n 'initial_rate': None,\n 'reversion_rate': None,\n 'term': None,\n 'interest_only_amount': None,\n 'upfront_fees': None,\n 'upfront_costs': None,\n 'entity_eir': None\n }",
"def __init__(self, initial_data=[]):\n hdict.__init__(self)\n\n for elt in initial_data:\n self.add(elt)",
"def __init__(self, challenge_id=None, shard=None, archive_name=None, creation_timestamp=None, last_update_timestamp=None, archive_completion_percentage=None, current_completion_percentage=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._challenge_id = None\n self._shard = None\n self._archive_name = None\n self._creation_timestamp = None\n self._last_update_timestamp = None\n self._archive_completion_percentage = None\n self._current_completion_percentage = None\n self.discriminator = None\n\n if challenge_id is not None:\n self.challenge_id = challenge_id\n if shard is not None:\n self.shard = shard\n if archive_name is not None:\n self.archive_name = archive_name\n if creation_timestamp is not None:\n self.creation_timestamp = creation_timestamp\n if last_update_timestamp is not None:\n self.last_update_timestamp = last_update_timestamp\n if archive_completion_percentage is not None:\n self.archive_completion_percentage = archive_completion_percentage\n if current_completion_percentage is not None:\n self.current_completion_percentage = current_completion_percentage",
"def __init__(self):\n\t\tsuper().__init__()\n\t\t\n\t\t# Typically a list of data here\n\t\t# Typically a dict of header keys and values here",
"def initialize(self):\n self.data = None\n self.errors = []",
"def __init__(self):\r\n\t\twith open(\"eqs.json\") as qData:\r\n\t\t\tself.questions = json.load(qData)\r\n\t\twith open(\"eqsave.json\") as uData:\r\n\t\t\tself.records = json.load(uData)\r\n\t\tself.types = {\"1\": \"Reformer\", \"2\": \"Helper\", \"3\": \"Achiever\", \"4\": \"Individualist\", \"5\": \"Investigator\", \"6\": \"Loyalist\", \"7\": \"Enthusiast\", \"8\": \"Challenger\", \"9\": \"Peacemaker\"}",
"def _init_dataset():\n global _residues\n if _residues is not None:\n # Database is already initialized\n return\n\n # Residuue data is taken from\n # ftp://ftp.wwpdb.org/pub/pdb/data/monomers/components.cif\n # (2019/01/27)\n _info_dir = dirname(realpath(__file__))\n with open(join(_info_dir, \"residues.msgpack\"), \"rb\") as file:\n _residues = msgpack.unpack(\n file, use_list=False, raw=False\n )",
"def setup(self): \n self.suburbs_dict = dict()\n self.raw_proIds_dict = dict()\n self.propertyIds_dict = dict()\n self.valuations = dict()",
"def initialise(self):",
"def setUpFormData(self):\n self.formData = {'labGroup': '5', 'abbrev': 'etoh', 'name': 'ethanol', 'CAS_ID': '64-17-5', 'CSID': '682',\n 'chemicalClasses': [ChemicalClass.objects.get(label='Solv').pk]}",
"def _initialize_data(self):\n self.unique_id = 123\n\n self.gas_valve_open = False\n self.buffer_valve_open = False\n self.pump_valve_open = False\n\n self.operatingmode = 0\n\n self.sample_pressure_high_limit = 100\n self.sample_pressure_low_limit = 10\n self.sample_pressure = 0\n\n self.error = 0\n\n self.buffer_pressure_high = True",
"def __init__(self, data: dict = {}):\n pass",
"def __init__(self):\n\n self.dialogue_ids = self.__load_dialogue_ids(\"data/dialogue_ids.txt\")\n self.class_dict = self.__load_class_representation(\"data/class_vectors.txt\")",
"def _init_data(self, data):\n assert type(data) is dict, \"dict expected: %r\" % type(data)\n assert len(data) is 1, \"size of dict should be 1: %r\" % len(data)\n self._name = data.keys()[0]\n self._data = np.asarray(data[self._name])\n self._set = True",
"def __init__(self, course_id, name, content):\n # course id and name need to go over the string_correct function\n self.ID = self.id_correct(str(course_id))\n self.name = self.string_correct(str(name))\n\n self.content = str(content)\n\n # uninitialized variables\n self.prere = {}\n self.postre = {}\n self.description = None\n self.department_title = None\n self.course_level = None\n\n # parse description and prerequisite raw data from content var\n self.seperate_content()",
"def __init__( self ):\n self.arguments = []\n self._opt_specs = []\n self._pos_specs = []\n self._values = {}",
"def __init__(self):\n self.name = ''\n self.variables = []\n self.assumptions = []\n self.guarantees = []",
"def __init__(self):\n self.N_Chls = 0\n self.N_Chl_a = 0\n self.N_Chl_b = 0\n self.type = \"none\"",
"def initialize(self,inputDict):\n pass",
"def _init_data(self) -> None:\n self.dtype = dict()\n self.shape = dict()\n self.size = dict()\n self.attrs = dict()\n self.data_ptr = dict()\n\n if self.mode == 'r':\n for k in self.fp.keys():\n self.dtype[k] = self.fp[k].dtype\n self.shape[k] = self.fp[k].shape\n self.size[k] = self.fp[k].shape[0]\n self.data_ptr[k] = 0",
"def initialize(self):\n self.candidate_disease_list = []\n self.candidate_symptom_list = []\n self.agent_action = {\n \"turn\":None,\n \"action\":None,\n \"request_slots\":{},\n \"inform_slots\":{},\n \"explicit_inform_slots\":{},\n \"implicit_inform_slots\":{},\n \"speaker\":\"agent\"\n }",
"def __init__(self, data_manager, response_variable=None, explanatory_variables=None):\n\n variable_names = data_manager.get_variable_names()\n\n # if the response and explanatory variables aren't specified, set the response to the first in the list of\n # variables and the explanatory variables to the remaining variables\n if response_variable is None and explanatory_variables is None:\n response_variable = variable_names[0]\n explanatory_variables = variable_names[1:]\n\n # if the response variable isn't specified and the explanatory variables are, set the response variable to the\n # first variable not in the explanatory variables\n elif response_variable is None and explanatory_variables is not None:\n # raw_explanatory_variables = [raw_variable for _, raw_variable in ]\n possible_response_variables = [var for var in variable_names if var not in explanatory_variables]\n response_variable = possible_response_variables[0]\n\n # if the response variable is specified and the explanatory variables aren't, set the explanatory variables to\n # the variables that aren't the response variable\n elif response_variable is not None and explanatory_variables is None:\n _, raw_response_variable = find_raw_variable(response_variable)\n explanatory_variables = [var for var in variable_names if var != raw_response_variable]\n\n super().__init__(data_manager, response_variable)\n\n self.set_explanatory_variables(explanatory_variables)",
"def init_test_definitions():\n test_definitions = []\n\n # add info to list in memory, one by one, following signature values\n test_def_ID = 5\n test_def_name = \"VM failure impact on virtual firewall (vFW VNF)\"\n test_def_challengeDefID = 5\n test_def_testCaseID = 5\n test_def_VNFIDs = [1]\n test_def_associatedMetricsIDs = [2]\n test_def_recipientIDs = [2]\n test_def_testCLICommandSent = [\"pwd\",\"kubectl describe pods --include-uninitialized=false\"]\n test_def_testAPICommandSent = [\"data1\",\"data2\"]\n test_def_testCodeID = 5\n test_definitions.append(TestDefinition(test_def_ID, test_def_name,\n test_def_challengeDefID,\n test_def_testCaseID,\n test_def_VNFIDs,\n test_def_associatedMetricsIDs,\n test_def_recipientIDs,\n test_def_testCLICommandSent,\n test_def_testAPICommandSent,\n test_def_testCodeID))\n\n # write list to binary file\n write_list_bin(test_definitions, FILE_TEST_DEFINITIONS)\n\n return test_definitions",
"def __init__(self):\n self._create_options()\n self._create_sections()",
"def __init__(self):\n self.constant_fields = {}\n self.post_score_renames = {}\n self.form = None\n self.form_field_regex = None\n self.field_count = None\n\n self.set_generic_fields()\n self.set_specific_fields()\n self.set_post_score_renames()"
] | [
"0.6270538",
"0.61179715",
"0.6039615",
"0.59718883",
"0.59608895",
"0.5943203",
"0.5878078",
"0.5860947",
"0.5855922",
"0.5850166",
"0.58261216",
"0.58040184",
"0.5778829",
"0.5774496",
"0.5738343",
"0.57342714",
"0.5699423",
"0.56817144",
"0.5681485",
"0.5676142",
"0.5654239",
"0.5644409",
"0.563065",
"0.5629923",
"0.56189233",
"0.56147164",
"0.56053096",
"0.56035453",
"0.5591968",
"0.5587876"
] | 0.70936805 | 0 |
Function to initialize metric definition data. | def init_metric_definitions():
metric_definitions = []
# add info to list in memory, one by one, following signature values
metric_def_ID = 1
metric_def_name = "Recovery Time"
metric_def_info = "Measures time taken by ONAP to restore a VNF"
metric_definitions.append(RecoveryTimeDef(metric_def_ID, metric_def_name,
metric_def_info))
metric_def_ID = 2
metric_def_name = "Uptime Percentage"
metric_def_info = "Measures ratio of uptime to reference time, not counting planned downtime"
metric_definitions.append(UptimePercentageDef(metric_def_ID, metric_def_name,
metric_def_info))
# write list to binary file
write_list_bin(metric_definitions, FILE_METRIC_DEFINITIONS)
return metric_definitions | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initialize_metrics():\n metrics = {\n 'cd_losses': [],\n 'cd_corrects': [],\n 'cd_precisions': [],\n 'cd_recalls': [],\n 'cd_f1scores': [],\n }\n\n return metrics",
"def initialize(self, runInfo, inputs, initDict) :\n super().initialize(runInfo, inputs, initDict)\n for metricIn in self.assemblerDict['Metric']:\n self.metricsDict[metricIn[2]] = metricIn[3]",
"def init_metrics(self):\n\n self.metrics = {}\n\n self.metrics['train_loss'] = np.zeros(0)\n self.metrics['test_loss'] = np.zeros(0)\n\n # self.orth_clf = LinearDecoder(self, self.q_, MeanClassifier)\n # self.metrics['train_orthogonality'] = np.zeros(0)\n # self.metrics['test_orthogonality'] = np.zeros(0)\n\n self.metrics['train_parallelism'] = np.zeros((0,self.q_)) \n self.metrics['test_parallelism'] = np.zeros((0,self.q_))",
"def init_metrics():\n metrics = defaultdict(list)\n metrics['best_acc'] = 0.0\n metrics['best_loss'] = float('inf')\n metrics['best_epoch'] = 0\n return metrics",
"def __init__(self):\n super().__init__()\n self.metric = 'FMEASR'",
"def init_metric_dict(self, metrics=None, init_value=None):\n if metrics is None:\n metrics = [\"\"]\n\n if init_value is None:\n init_value = 0.0\n\n self.metric_dict = {metric: init_value for metric in metrics}",
"def __init_metrics(self):\n\n batch = {}\n # split data into batches of size batch_size or less\n for metric_name, metric_pattern in self.metrics.items():\n # get the batch list for that metric\n batch_list = []\n for s in range(1, self.schema + 1):\n for t in range(1, self.table + 1):\n k = '/metrics/type=IndexTable/keyspace={}/scope={}/name={}/mean'.format(s, t, metric_name)\n # from Python 3.6 onwards, the standard dict type maintains insertion order by default\n batch[k] = 0\n # if the batch has batch_size items or at the end of iteration,\n # append the batch to list of that metric and create a new empty batch\n if len(batch) == self.batch_size or (s == self.schema and t == self.table):\n batch_list.append(batch)\n batch = {}\n\n # parse metric patterns\n l = metric_pattern.split()\n if l[0] == '(>':\n self.metrics[metric_name] = IncMetricStruct(float(int(l[1])), float(l[2][1:]), float(l[4][:-2]),\n batch_list)\n else:\n self.metrics[metric_name] = RandMetricStruct(float(l[0][1:]), float(l[-1][:-1]), batch_list)",
"def __init__(self):\n super(INumpyArrayMetric, self).__init__()\n self.metric = 'INumpyArrayMetric'\n self.ground_truth = None # np.ndarray\n self.segmentation = None # np.ndarray",
"def __init__(self, metricName, timeResolutions = (86400,)):\n self.metric = metricName\n self.timeResolutions = timeResolutions",
"def __init__(self):\n super().__init__()\n self.metric = 'HDRFDST'",
"def __init__(self) -> None:\n self.metrics = {}\n self.current = None\n self.run = None",
"def __init__(self):\n super().__init__()\n self.metric = 'IConfusionMatrixMetric'\n self.confusion_matrix = None # ConfusionMatrix",
"def __init__(self):\n super().__init__()\n self.metric = 'MAHLNBS'",
"def __init__(self):\n super().__init__()\n self.metric = 'FN'",
"def __init__(self):\n super().__init__()\n self.printTag = 'POSTPROCESSOR Metrics'\n self.dynamic = False # is it time-dependent?\n self.features = None # list of feature variables\n self.targets = None # list of target variables\n self.metricsDict = {} # dictionary of metrics that are going to be assembled\n self.multiOutput = 'mean'# defines aggregating of multiple outputs for HistorySet\n # currently allow mean, max, min, raw_values\n self.weight = None # 'mean' is provided for self.multiOutput, weights can be used\n # for each individual output when all outputs are averaged\n self.pivotParameter = None\n self.pivotValues = []\n # assembler objects to be requested\n self.addAssemblerObject('Metric', InputData.Quantity.one_to_infinity)",
"def __init__(self):\n super().__init__()\n self.metric = 'FALLOUT'",
"def __init__(self):\n super().__init__()\n self.metric = 'JACRD'",
"def __init__(self, replication_num, metric_name_array, metric_collection_types = None, detailed_metric_assembly = False):\n self.replication_num = replication_num\n self.metrics = metric_name_array\n self.metric_collection_types = metric_collection_types # can be a string array elements of which can be one of ('STRING_LIST', 'COUNT_MAX', 'MEAN_STD','MIN','MAX', 'MIN_MAX') \n self.detailed_metric_assembly = detailed_metric_assembly\n self.replication_counter = 0\n self.metric_final_results = {}\n # initialize results array for each metric\n for metric in metric_name_array:\n self.metric_final_results[metric] = []",
"def __init__(self, included_metrics: List[str]):\n self.included_metrics = included_metrics\n self.metrics = self._initialize_metrics()",
"def __init__(self, group_key=None, sort_key=None):\n super(Metric, self).__init__()\n self.group_key = group_key\n self.sort_key = sort_key",
"def __init__(self):\r\n # sample ID -> (ref individual count,\r\n # {size -> (estimate, std err, ci_low, ci_high)})\r\n self._data = {}",
"def __init__(self, metrics, schema, table, nid):\n\n self.id = nid\n self.metrics = metrics\n self.schema = schema\n self.table = table\n self.batch_size = 20\n self.__init_metrics()",
"def create_metric(self) -> EvalMetric:\n pass",
"def __init__(self, **kwargs):\n DataLoader.__init__(self, **kwargs)\n \n self._query_names_[FDH._QTYPE_BANNER_] = 'report_banner_metrics_minutely'\n self._query_names_[FDH._QTYPE_LP_] = 'report_LP_metrics_minutely'\n self._query_names_[FDH._QTYPE_BANNER_LP_] = 'report_bannerLP_metrics_minutely'\n self._query_names_['campaign'] = 'report_campaign_metrics_minutely'\n self._query_names_['campaign_total'] = 'report_campaign_metrics_minutely_total'\n \n self._query_names_[FDH._QTYPE_BANNER_ + FDH._QTYPE_TIME_] = 'report_banner_metrics_minutely_all'\n self._query_names_[FDH._QTYPE_LP_ + FDH._QTYPE_TIME_] = 'report_lp_metrics_minutely_all'\n self._query_names_[FDH._QTYPE_CAMPAIGN_ + FDH._QTYPE_TIME_] = 'report_campaign_metrics_minutely_all'\n \n self._query_type_ = kwargs['query_type']\n \n \"\"\" hardcode the data handler for now \"\"\"\n self._data_handler_ = FDH\n \n self._summary_data_ = None",
"def __init__(self, collectd):\n self.collectd = collectd\n self.conf = self.default_config()\n self.types = {}\n\n collectd.info('Initialized MetricsConfig with default config %s' % self.conf)",
"def __init__(self):\n super().__init__()\n self.metric = 'PROBDST'",
"def __init__(self):\n super().__init__()\n self.metric = 'AVGDIST'",
"def __init__(self):\n super().__init__()\n self.metric = 'ACURCY'",
"def __init__(self, **kwargs):\r\n schema = MlModelMetricSchema()\r\n schema_collection = MlModelMetricSchema(many=True)\r\n super().__init__(schema, schema_collection, **kwargs)",
"def __init__(self, **kwargs):\r\n schema = MlModelMetricSchema()\r\n schema_collection = MlModelMetricSchema(many=True)\r\n super().__init__(schema, schema_collection, **kwargs)"
] | [
"0.7117321",
"0.69168526",
"0.6888356",
"0.66131765",
"0.6559224",
"0.653191",
"0.65315676",
"0.64484173",
"0.6444055",
"0.64089125",
"0.6371227",
"0.63554186",
"0.6349524",
"0.631591",
"0.6296783",
"0.62531286",
"0.6162134",
"0.6160958",
"0.61490244",
"0.612974",
"0.6121506",
"0.6118204",
"0.6105687",
"0.60705984",
"0.6070511",
"0.60661286",
"0.606499",
"0.6056656",
"0.60381085",
"0.60381085"
] | 0.70609146 | 1 |
Function to initialize physical resource data. | def init_physical_resources():
test_physical_resources = []
# add info to list in memory, one by one, following signature values
phys_resrc_ID = 1
phys_resrc_name = "small-cavium-1"
phys_resrc_info = "Jump server in Arm pod, 48 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS"
phys_resrc_IPAddress = "10.10.50.12"
phys_resrc_MACAddress = "00-14-22-01-23-45"
test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,
phys_resrc_info,
phys_resrc_IPAddress,
phys_resrc_MACAddress))
phys_resrc_ID = 2
phys_resrc_name = "medium-cavium-1"
phys_resrc_info = "Jump server in New York pod, 96 cores, 64G RAM, 447G SSD, aarch64 Cavium ThunderX, Ubuntu OS"
phys_resrc_IPAddress = "30.31.32.33"
phys_resrc_MACAddress = "0xb3:22:05:c1:aa:82"
test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,
phys_resrc_info,
phys_resrc_IPAddress,
phys_resrc_MACAddress))
phys_resrc_ID = 3
phys_resrc_name = "mega-cavium-666"
phys_resrc_info = "Jump server in Las Vegas, 1024 cores, 1024G RAM, 6666G SSD, aarch64 Cavium ThunderX, Ubuntu OS"
phys_resrc_IPAddress = "54.53.52.51"
phys_resrc_MACAddress = "01-23-45-67-89-ab"
test_physical_resources.append(PhysicalResource(phys_resrc_ID, phys_resrc_name,
phys_resrc_info,
phys_resrc_IPAddress,
phys_resrc_MACAddress))
# write list to binary file
write_list_bin(test_physical_resources, FILE_PHYSICAL_RESOURCES)
return test_physical_resources | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()",
"def set_resource_data(self, resource, meta):\n super().set_resource_data(resource, meta)\n self._set_resource_temperature(resource)\n self._set_egs_plant_design_temperature()\n self._set_nameplate_to_match_resource_potential(resource)\n self._set_resource_potential_to_match_gross_output()\n self._set_costs()",
"def set_resource_data(self, resource, meta):\n\n # map resource data names to SAM required data names\n var_map = {'speed': 'windspeed',\n 'direction': 'winddirection',\n 'airtemperature': 'temperature',\n 'temp': 'temperature',\n 'surfacepressure': 'pressure',\n 'relativehumidity': 'rh',\n 'humidity': 'rh',\n }\n lower_case = {k: k.lower().replace(' ', '').replace('_', '')\n for k in resource.columns}\n resource = resource.rename(mapper=lower_case, axis='columns')\n resource = resource.rename(mapper=var_map, axis='columns')\n\n data_dict = {}\n var_list = ['temperature', 'pressure', 'windspeed', 'winddirection']\n if 'winddirection' not in resource:\n resource['winddirection'] = 0.0\n\n time_index = resource.index\n self.time_interval = self.get_time_interval(resource.index.values)\n\n data_dict['fields'] = [1, 2, 3, 4]\n data_dict['heights'] = 4 * [self.sam_sys_inputs['wind_turbine_hub_ht']]\n\n if 'rh' in resource:\n # set relative humidity for icing.\n rh = self.ensure_res_len(resource['rh'].values, time_index)\n n_roll = int(meta['timezone'] * self.time_interval)\n rh = np.roll(rh, n_roll, axis=0)\n data_dict['rh'] = rh.tolist()\n\n # must be set as matrix in [temperature, pres, speed, direction] order\n # ensure that resource array length is multiple of 8760\n # roll the truncated resource array to local timezone\n temp = self.ensure_res_len(resource[var_list].values, time_index)\n n_roll = int(meta['timezone'] * self.time_interval)\n temp = np.roll(temp, n_roll, axis=0)\n data_dict['data'] = temp.tolist()\n\n data_dict['lat'] = meta['latitude']\n data_dict['lon'] = meta['longitude']\n data_dict['tz'] = meta['timezone']\n data_dict['elev'] = meta['elevation']\n\n time_index = self.ensure_res_len(time_index, time_index)\n data_dict['minute'] = time_index.minute\n data_dict['hour'] = time_index.hour\n data_dict['year'] = time_index.year\n data_dict['month'] = time_index.month\n data_dict['day'] = time_index.day\n\n # add resource data to self.data and clear\n self['wind_resource_data'] = data_dict\n self['wind_resource_model_choice'] = 0",
"def _initialize_data(self):\n self.unique_id = 123\n\n self.gas_valve_open = False\n self.buffer_valve_open = False\n self.pump_valve_open = False\n\n self.operatingmode = 0\n\n self.sample_pressure_high_limit = 100\n self.sample_pressure_low_limit = 10\n self.sample_pressure = 0\n\n self.error = 0\n\n self.buffer_pressure_high = True",
"def set_resource_data(self, resource, meta):",
"def __init__(self, resource_type_dict):\n # name\n self.resource_name = resource_type_dict['resource_name']\n # cores\n self.min_core = resource_type_dict['mincore']\n self.max_core = resource_type_dict['maxcore']\n # memory\n self.min_ram_per_core = resource_type_dict['minrampercore']\n self.max_ram_per_core = resource_type_dict['maxrampercore']",
"def set_resource_data(self, resource, meta):\n\n # map resource data names to SAM required data names\n var_map = {'significantwaveheight': 'significant_wave_height',\n 'waveheight': 'significant_wave_height',\n 'height': 'significant_wave_height',\n 'swh': 'significant_wave_height',\n 'energyperiod': 'energy_period',\n 'waveperiod': 'energy_period',\n 'period': 'energy_period',\n 'ep': 'energy_period',\n }\n lower_case = {k: k.lower().replace(' ', '').replace('_', '')\n for k in resource.columns}\n resource = resource.rename(mapper=lower_case, axis='columns')\n resource = resource.rename(mapper=var_map, axis='columns')\n\n data_dict = {}\n\n time_index = resource.index\n self.time_interval = self.get_time_interval(resource.index.values)\n\n # must be set as matrix in [temperature, pres, speed, direction] order\n # ensure that resource array length is multiple of 8760\n # roll the truncated resource array to local timezone\n for var in ['significant_wave_height', 'energy_period']:\n arr = self.ensure_res_len(resource[var].values, time_index)\n n_roll = int(meta['timezone'] * self.time_interval)\n data_dict[var] = np.roll(arr, n_roll, axis=0).tolist()\n\n data_dict['lat'] = meta['latitude']\n data_dict['lon'] = meta['longitude']\n data_dict['tz'] = meta['timezone']\n\n time_index = self.ensure_res_len(time_index, time_index)\n data_dict['minute'] = time_index.minute\n data_dict['hour'] = time_index.hour\n data_dict['year'] = time_index.year\n data_dict['month'] = time_index.month\n data_dict['day'] = time_index.day\n\n # add resource data to self.data and clear\n self['wave_resource_data'] = data_dict",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n bundle_name: Optional[pulumi.Input[str]] = None,\n description: Optional[pulumi.Input[str]] = None,\n desktop_type: Optional[pulumi.Input[str]] = None,\n image_id: Optional[pulumi.Input[str]] = None,\n language: Optional[pulumi.Input[str]] = None,\n root_disk_performance_level: Optional[pulumi.Input[str]] = None,\n root_disk_size_gib: Optional[pulumi.Input[int]] = None,\n user_disk_performance_level: Optional[pulumi.Input[str]] = None,\n user_disk_size_gibs: Optional[pulumi.Input[Sequence[pulumi.Input[int]]]] = None,\n __props__=None):\n ...",
"def __init__(self, resource, pin):\n self._resource = resource\n self._pin = pin\n self.data = {}",
"def __init__(self):\n self._id = 0\n self._init_cpu_cores_capacity = 0\n self._init_memory_capacity = 0\n self._init_pm_type = 0\n self._init_pm_state = 0\n\n self._region_id = 0\n self._zone_id = 0\n self._data_center_id = 0\n self._cluster_id = 0\n self._rack_id = 0\n\n # PM resource.\n self._live_vms: Set[int] = set()",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n block_size_bytes: Optional[pulumi.Input[int]] = None,\n container_id: Optional[pulumi.Input[str]] = None,\n disk_file_format: Optional[pulumi.Input[Union[str, 'DiskFileFormat']]] = None,\n disk_size_gb: Optional[pulumi.Input[float]] = None,\n dynamic: Optional[pulumi.Input[bool]] = None,\n extended_location: Optional[pulumi.Input[pulumi.InputType['ExtendedLocationArgs']]] = None,\n hyper_v_generation: Optional[pulumi.Input[Union[str, 'HyperVGeneration']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n logical_sector_bytes: Optional[pulumi.Input[int]] = None,\n physical_sector_bytes: Optional[pulumi.Input[int]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n virtual_hard_disk_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...",
"def __init__(self, owner, resourceFile):\n self.checksum = Path(resourceFile).md5 # Just use the path name as a unique ID\n _Resource.__init__(self, owner, resourceFile)\n if self._idevice:\n self._idevice.userResources.append(self)",
"def initAttributes(self):\n CCSDS.DU.DataUnit.initAttributes(self)\n self.dataFieldHeaderFlag = 0\n self.setPacketLength()",
"def set_resource_data(self, resource, meta):\n\n time_index = resource.index\n self.time_interval = self.get_time_interval(resource.index.values)\n\n # map resource data names to SAM required data names\n var_map = {'dni': 'dn',\n 'dhi': 'df',\n 'ghi': 'gh',\n 'clearskydni': 'dn',\n 'clearskydhi': 'df',\n 'clearskyghi': 'gh',\n 'windspeed': 'wspd',\n 'airtemperature': 'tdry',\n 'temperature': 'tdry',\n 'temp': 'tdry',\n 'dewpoint': 'tdew',\n 'surfacepressure': 'pres',\n 'pressure': 'pres',\n 'surfacealbedo': 'albedo',\n }\n lower_case = {k: k.lower().replace(' ', '').replace('_', '')\n for k in resource.columns}\n irrad_vars = ['dn', 'df', 'gh']\n\n resource = resource.rename(mapper=lower_case, axis='columns')\n resource = resource.rename(mapper=var_map, axis='columns')\n time_index = resource.index\n resource = {k: np.array(v) for (k, v) in\n resource.to_dict(orient='list').items()}\n\n # set resource variables\n for var, arr in resource.items():\n if var != 'time_index':\n\n # ensure that resource array length is multiple of 8760\n arr = self.ensure_res_len(arr, time_index)\n n_roll = int(self._meta['timezone'] * self.time_interval)\n arr = np.roll(arr, n_roll)\n\n if var in irrad_vars:\n if np.min(arr) < 0:\n warn('Solar irradiance variable \"{}\" has a minimum '\n 'value of {}. Truncating to zero.'\n .format(var, np.min(arr)), SAMInputWarning)\n arr = np.where(arr < 0, 0, arr)\n\n resource[var] = arr.tolist()\n\n resource['lat'] = meta['latitude']\n resource['lon'] = meta['longitude']\n resource['tz'] = meta['timezone']\n\n if 'elevation' in meta:\n resource['elev'] = meta['elevation']\n else:\n resource['elev'] = 0.0\n\n time_index = self.ensure_res_len(time_index, time_index)\n resource['minute'] = time_index.minute\n resource['hour'] = time_index.hour\n resource['month'] = time_index.month\n resource['year'] = time_index.year\n resource['day'] = time_index.day\n\n if 'albedo' in resource:\n self['albedo'] = self.agg_albedo(\n time_index, resource.pop('albedo'))\n\n self['solar_resource_data'] = resource",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n catalog_name: Optional[pulumi.Input[str]] = None,\n image: Optional[pulumi.Input[str]] = None,\n image_id: Optional[pulumi.Input[str]] = None,\n image_name: Optional[pulumi.Input[str]] = None,\n regional_data_boundary: Optional[pulumi.Input[Union[str, 'RegionalDataBoundary']]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...",
"def __init__(__self__, *,\n resource_id: pulumi.Input[str],\n datasource_type: Optional[pulumi.Input[str]] = None,\n object_type: Optional[pulumi.Input[str]] = None,\n resource_location: Optional[pulumi.Input[str]] = None,\n resource_name: Optional[pulumi.Input[str]] = None,\n resource_type: Optional[pulumi.Input[str]] = None,\n resource_uri: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"resource_id\", resource_id)\n if datasource_type is not None:\n pulumi.set(__self__, \"datasource_type\", datasource_type)\n if object_type is not None:\n pulumi.set(__self__, \"object_type\", object_type)\n if resource_location is not None:\n pulumi.set(__self__, \"resource_location\", resource_location)\n if resource_name is not None:\n pulumi.set(__self__, \"resource_name\", resource_name)\n if resource_type is not None:\n pulumi.set(__self__, \"resource_type\", resource_type)\n if resource_uri is not None:\n pulumi.set(__self__, \"resource_uri\", resource_uri)",
"def __init__(__self__, *,\n resource_id: pulumi.Input[str],\n datasource_type: Optional[pulumi.Input[str]] = None,\n object_type: Optional[pulumi.Input[str]] = None,\n resource_location: Optional[pulumi.Input[str]] = None,\n resource_name: Optional[pulumi.Input[str]] = None,\n resource_type: Optional[pulumi.Input[str]] = None,\n resource_uri: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"resource_id\", resource_id)\n if datasource_type is not None:\n pulumi.set(__self__, \"datasource_type\", datasource_type)\n if object_type is not None:\n pulumi.set(__self__, \"object_type\", object_type)\n if resource_location is not None:\n pulumi.set(__self__, \"resource_location\", resource_location)\n if resource_name is not None:\n pulumi.set(__self__, \"resource_name\", resource_name)\n if resource_type is not None:\n pulumi.set(__self__, \"resource_type\", resource_type)\n if resource_uri is not None:\n pulumi.set(__self__, \"resource_uri\", resource_uri)",
"def __init__(__self__, *,\n resource_id: pulumi.Input[str],\n datasource_type: Optional[pulumi.Input[str]] = None,\n object_type: Optional[pulumi.Input[str]] = None,\n resource_location: Optional[pulumi.Input[str]] = None,\n resource_name: Optional[pulumi.Input[str]] = None,\n resource_type: Optional[pulumi.Input[str]] = None,\n resource_uri: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"resource_id\", resource_id)\n if datasource_type is not None:\n pulumi.set(__self__, \"datasource_type\", datasource_type)\n if object_type is not None:\n pulumi.set(__self__, \"object_type\", object_type)\n if resource_location is not None:\n pulumi.set(__self__, \"resource_location\", resource_location)\n if resource_name is not None:\n pulumi.set(__self__, \"resource_name\", resource_name)\n if resource_type is not None:\n pulumi.set(__self__, \"resource_type\", resource_type)\n if resource_uri is not None:\n pulumi.set(__self__, \"resource_uri\", resource_uri)",
"def __init__(__self__, *,\n resource_id: pulumi.Input[str],\n datasource_type: Optional[pulumi.Input[str]] = None,\n object_type: Optional[pulumi.Input[str]] = None,\n resource_location: Optional[pulumi.Input[str]] = None,\n resource_name: Optional[pulumi.Input[str]] = None,\n resource_type: Optional[pulumi.Input[str]] = None,\n resource_uri: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"resource_id\", resource_id)\n if datasource_type is not None:\n pulumi.set(__self__, \"datasource_type\", datasource_type)\n if object_type is not None:\n pulumi.set(__self__, \"object_type\", object_type)\n if resource_location is not None:\n pulumi.set(__self__, \"resource_location\", resource_location)\n if resource_name is not None:\n pulumi.set(__self__, \"resource_name\", resource_name)\n if resource_type is not None:\n pulumi.set(__self__, \"resource_type\", resource_type)\n if resource_uri is not None:\n pulumi.set(__self__, \"resource_uri\", resource_uri)",
"def _load_resource(self, resource):\r\n self.resource = resource\r\n self.tenant_id = resource.tenant_id\r\n self.subscription_id = resource.subscription_id\r\n self.account_name = resource.account_name\r\n self.container_name = resource.container_name\r\n self.resource_name = f\"{self.account_name}:{self.container_name}\"",
"def memb_init(self):\n self.initialize()",
"def __init__(__self__,\n resource_name: str,\n args: VirtualHardDiskArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(__self__, *,\n resource_id: str,\n datasource_type: Optional[str] = None,\n object_type: Optional[str] = None,\n resource_location: Optional[str] = None,\n resource_name: Optional[str] = None,\n resource_type: Optional[str] = None,\n resource_uri: Optional[str] = None):\n pulumi.set(__self__, \"resource_id\", resource_id)\n if datasource_type is not None:\n pulumi.set(__self__, \"datasource_type\", datasource_type)\n if object_type is not None:\n pulumi.set(__self__, \"object_type\", object_type)\n if resource_location is not None:\n pulumi.set(__self__, \"resource_location\", resource_location)\n if resource_name is not None:\n pulumi.set(__self__, \"resource_name\", resource_name)\n if resource_type is not None:\n pulumi.set(__self__, \"resource_type\", resource_type)\n if resource_uri is not None:\n pulumi.set(__self__, \"resource_uri\", resource_uri)",
"def __init__(__self__, *,\n resource_id: str,\n datasource_type: Optional[str] = None,\n object_type: Optional[str] = None,\n resource_location: Optional[str] = None,\n resource_name: Optional[str] = None,\n resource_type: Optional[str] = None,\n resource_uri: Optional[str] = None):\n pulumi.set(__self__, \"resource_id\", resource_id)\n if datasource_type is not None:\n pulumi.set(__self__, \"datasource_type\", datasource_type)\n if object_type is not None:\n pulumi.set(__self__, \"object_type\", object_type)\n if resource_location is not None:\n pulumi.set(__self__, \"resource_location\", resource_location)\n if resource_name is not None:\n pulumi.set(__self__, \"resource_name\", resource_name)\n if resource_type is not None:\n pulumi.set(__self__, \"resource_type\", resource_type)\n if resource_uri is not None:\n pulumi.set(__self__, \"resource_uri\", resource_uri)",
"def __init__(__self__, *,\n non_resource_attributes: Optional[pulumi.Input['NonResourceAttributesArgs']] = None,\n resource_attributes: Optional[pulumi.Input['ResourceAttributesArgs']] = None):\n if non_resource_attributes is not None:\n pulumi.set(__self__, \"non_resource_attributes\", non_resource_attributes)\n if resource_attributes is not None:\n pulumi.set(__self__, \"resource_attributes\", resource_attributes)",
"def pre_physical_interface_create(self, resource_dict):\n pass",
"def __init__(__self__,\n resource_name: str,\n args: RuntimeArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(__self__,\n resource_name: str,\n args: EnvironmentArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def set_resource_data(self, resource, meta):\n self.time_interval = self.get_time_interval(resource.index.values)\n pysam_w_fname = self._create_pysam_wfile(resource, meta)\n self[self.PYSAM_WEATHER_TAG] = pysam_w_fname",
"def __init__(self, owner, resourceFile):\n log.debug(u\"init resourceFile=%s\" % resourceFile)\n self._storageName = self._fn2ascii(resourceFile)\n self._userName = resourceFile.encode('utf-8')\n self._originalFile = resourceFile\n try:\n self.checksum = resourceFile.md5\n from exe.engine.idevice import Idevice\n if isinstance(owner, Idevice):\n self._idevice = owner\n if owner.parentNode:\n self.package = owner.parentNode.package\n else:\n self.package = None\n else:\n self._idevice = None\n self.package = owner\n finally:\n del self._originalFile"
] | [
"0.67553765",
"0.6618656",
"0.66008776",
"0.654145",
"0.65205157",
"0.65172935",
"0.6508107",
"0.6412315",
"0.6384554",
"0.6343454",
"0.62426376",
"0.62185234",
"0.62047946",
"0.6136138",
"0.61295336",
"0.6123724",
"0.6123724",
"0.6123724",
"0.6123724",
"0.6063587",
"0.6057646",
"0.6047601",
"0.6033148",
"0.6033148",
"0.60000306",
"0.59915257",
"0.594161",
"0.5938926",
"0.5929649",
"0.5914416"
] | 0.6915017 | 0 |
Function to initialize cloud virtual resource data. | def init_cloud_virtual_resources():
test_cldvirt_resources = []
# add info to list in memory, one by one, following signature values
cldvirtres_ID = 1
cldvirtres_name = "nova-compute-1"
cldvirtres_info = "nova VM in Arm pod"
cldvirtres_IPAddress = "50.60.70.80"
cldvirtres_URL = "http://50.60.70.80:8080"
cldvirtres_related_phys_rsrcIDs = [1,3]
test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,
cldvirtres_info,
cldvirtres_IPAddress,
cldvirtres_URL,
cldvirtres_related_phys_rsrcIDs))
cldvirtres_ID = 2
cldvirtres_name = "nova-compute-2"
cldvirtres_info = "nova VM in LaaS"
cldvirtres_IPAddress = "50.60.70.80"
cldvirtres_URL = "http://50.60.70.80:8080"
cldvirtres_related_phys_rsrcIDs = [2,3]
test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,
cldvirtres_info,
cldvirtres_IPAddress,
cldvirtres_URL,
cldvirtres_related_phys_rsrcIDs))
cldvirtres_ID = 3
cldvirtres_name = "nova-compute-3"
cldvirtres_info = "nova VM in x86 pod"
cldvirtres_IPAddress = "50.60.70.80"
cldvirtres_URL = "http://50.60.70.80:8080"
cldvirtres_related_phys_rsrcIDs = [1]
test_cldvirt_resources.append(CloudVirtualResource(cldvirtres_ID, cldvirtres_name,
cldvirtres_info,
cldvirtres_IPAddress,
cldvirtres_URL,
cldvirtres_related_phys_rsrcIDs))
# write list to binary file
write_list_bin(test_cldvirt_resources, FILE_CLOUD_RESOURCES)
return test_cldvirt_resources | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(__self__,\n resource_name: str,\n args: VirtualHardDiskArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def pre_virtual_machine_create(self, resource_dict):\n pass",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n block_size_bytes: Optional[pulumi.Input[int]] = None,\n container_id: Optional[pulumi.Input[str]] = None,\n disk_file_format: Optional[pulumi.Input[Union[str, 'DiskFileFormat']]] = None,\n disk_size_gb: Optional[pulumi.Input[float]] = None,\n dynamic: Optional[pulumi.Input[bool]] = None,\n extended_location: Optional[pulumi.Input[pulumi.InputType['ExtendedLocationArgs']]] = None,\n hyper_v_generation: Optional[pulumi.Input[Union[str, 'HyperVGeneration']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n logical_sector_bytes: Optional[pulumi.Input[int]] = None,\n physical_sector_bytes: Optional[pulumi.Input[int]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n virtual_hard_disk_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...",
"def pre_virtual_DNS_create(self, resource_dict):\n pass",
"def pre_virtual_ip_create(self, resource_dict):\n pass",
"def pre_virtual_DNS_record_create(self, resource_dict):\n pass",
"def cloud_init(name, vm_=None, **kwargs):\n init_interface = cloud_init_interface(name, vm_, **kwargs)\n name = init_interface.pop(\"name\", name)\n return init(name, **init_interface)",
"def __init__(__self__,\n resource_name: str,\n args: RuntimeArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def pre_virtual_machine_interface_create(self, resource_dict):\n pass",
"def __init__(__self__,\n resource_name: str,\n args: RegionPerInstanceConfigArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def obj_initialization(cls):\n listimdata = cls.retrieve_json()\n for elem in listimdata:\n CloudCtx.retrieve_from_json(elem)",
"def __init__(__self__,\n resource_name: str,\n args: EnvironmentArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n cluster_name: Optional[pulumi.Input[str]] = None,\n node_setup: Optional[pulumi.Input[pulumi.InputType['NodeSetupArgs']]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n scale_settings: Optional[pulumi.Input[pulumi.InputType['ScaleSettingsArgs']]] = None,\n subnet: Optional[pulumi.Input[pulumi.InputType['ResourceIdArgs']]] = None,\n user_account_settings: Optional[pulumi.Input[pulumi.InputType['UserAccountSettingsArgs']]] = None,\n virtual_machine_configuration: Optional[pulumi.Input[pulumi.InputType['VirtualMachineConfigurationArgs']]] = None,\n vm_priority: Optional[pulumi.Input['VmPriority']] = None,\n vm_size: Optional[pulumi.Input[str]] = None,\n workspace_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...",
"def __init__(__self__,\n resource_name: str,\n args: PrivateCloudArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def set_resource_data(self, resource, meta):",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n agent_upgrade: Optional[pulumi.Input[pulumi.InputType['AgentUpgradeArgs']]] = None,\n client_public_key: Optional[pulumi.Input[str]] = None,\n extensions: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['MachineExtensionInstanceViewArgs']]]]] = None,\n identity: Optional[pulumi.Input[pulumi.InputType['IdentityArgs']]] = None,\n location: Optional[pulumi.Input[str]] = None,\n location_data: Optional[pulumi.Input[pulumi.InputType['LocationDataArgs']]] = None,\n machine_name: Optional[pulumi.Input[str]] = None,\n mssql_discovered: Optional[pulumi.Input[str]] = None,\n os_profile: Optional[pulumi.Input[pulumi.InputType['OSProfileArgs']]] = None,\n os_type: Optional[pulumi.Input[str]] = None,\n parent_cluster_resource_id: Optional[pulumi.Input[str]] = None,\n private_link_scope_resource_id: Optional[pulumi.Input[str]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n service_statuses: Optional[pulumi.Input[pulumi.InputType['ServiceStatusesArgs']]] = None,\n tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,\n vm_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n catalog_name: Optional[pulumi.Input[str]] = None,\n image: Optional[pulumi.Input[str]] = None,\n image_id: Optional[pulumi.Input[str]] = None,\n image_name: Optional[pulumi.Input[str]] = None,\n regional_data_boundary: Optional[pulumi.Input[Union[str, 'RegionalDataBoundary']]] = None,\n resource_group_name: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n name: Optional[pulumi.Input[str]] = None,\n virtual_hub_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ...",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n cluster_id: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n private_network_uuid: Optional[pulumi.Input[str]] = None,\n region: Optional[pulumi.Input[Union[str, 'Region']]] = None,\n size: Optional[pulumi.Input[Union[str, 'DatabaseSlug']]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n __props__=None):\n ...",
"def pre_virtual_network_create(self, resource_dict):\n pass",
"def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(__self__,\n resource_name: str,\n args: InstanceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(__self__,\n resource_name: str,\n args: VirtualNetworkApplianceArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def set_resource_data(self, resource, meta):\n super().set_resource_data(resource, meta)\n self._set_resource_temperature(resource)\n self._set_egs_plant_design_temperature()\n self._set_nameplate_to_match_resource_potential(resource)\n self._set_resource_potential_to_match_gross_output()\n self._set_costs()",
"def __init__(__self__,\n resource_name: str,\n args: CloudServicesNetworkArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(__self__,\n resource_name: str,\n opts: Optional[pulumi.ResourceOptions] = None,\n auto_renew: Optional[pulumi.Input[bool]] = None,\n auto_renew_period: Optional[pulumi.Input[int]] = None,\n cluster_name: Optional[pulumi.Input[str]] = None,\n data_center_name: Optional[pulumi.Input[str]] = None,\n disk_size: Optional[pulumi.Input[int]] = None,\n disk_type: Optional[pulumi.Input[str]] = None,\n enable_public: Optional[pulumi.Input[bool]] = None,\n instance_type: Optional[pulumi.Input[str]] = None,\n ip_white: Optional[pulumi.Input[str]] = None,\n maintain_end_time: Optional[pulumi.Input[str]] = None,\n maintain_start_time: Optional[pulumi.Input[str]] = None,\n major_version: Optional[pulumi.Input[str]] = None,\n node_count: Optional[pulumi.Input[int]] = None,\n password: Optional[pulumi.Input[str]] = None,\n pay_type: Optional[pulumi.Input[str]] = None,\n period: Optional[pulumi.Input[int]] = None,\n period_unit: Optional[pulumi.Input[str]] = None,\n security_groups: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Mapping[str, Any]]] = None,\n vswitch_id: Optional[pulumi.Input[str]] = None,\n zone_id: Optional[pulumi.Input[str]] = None,\n __props__=None):\n ..."
] | [
"0.65209603",
"0.64640987",
"0.63841754",
"0.63472927",
"0.63000673",
"0.62648845",
"0.625409",
"0.62490654",
"0.62399304",
"0.6233856",
"0.62064767",
"0.61925113",
"0.61889744",
"0.61788136",
"0.6159353",
"0.6158444",
"0.61480516",
"0.6146311",
"0.6145305",
"0.6139789",
"0.6122897",
"0.6122897",
"0.6122897",
"0.6122897",
"0.6122897",
"0.6122897",
"0.6100665",
"0.6099913",
"0.6086623",
"0.60860276"
] | 0.7303633 | 0 |
Function to initialize VNFs and e2e Services data. | def init_VNFs_Services():
test_VNFs_Services = []
# add info to list in memory, one by one, following signature values
vnf_serv_ID = 1
vnf_serv_name = "vCPE-1"
vnf_serv_info = "virtual CPE in Arm pod"
vnf_serv_IPAddress = "5.4.3.2"
vnf_serv_URL = "http://5.4.3.2:8080"
vnf_serv_related_phys_rsrcIDs = [1,2]
vnf_serv_related_cloudvirt_rsrcIDs = [1]
test_VNFs_Services.append(VNFService(vnf_serv_ID, vnf_serv_name,
vnf_serv_info,
vnf_serv_IPAddress,
vnf_serv_URL,
vnf_serv_related_phys_rsrcIDs,
vnf_serv_related_cloudvirt_rsrcIDs))
vnf_serv_ID = 2
vnf_serv_name = "vFW-1"
vnf_serv_info = "virtual Firewall in x86 pod"
vnf_serv_IPAddress = "6.7.8.9"
vnf_serv_URL = "http://6.7.8.9:8080"
vnf_serv_related_phys_rsrcIDs = [3]
vnf_serv_related_cloudvirt_rsrcIDs = [2,3]
test_VNFs_Services.append(VNFService(vnf_serv_ID, vnf_serv_name,
vnf_serv_info,
vnf_serv_IPAddress,
vnf_serv_URL,
vnf_serv_related_phys_rsrcIDs,
vnf_serv_related_cloudvirt_rsrcIDs))
# write list to binary file
write_list_bin(test_VNFs_Services, FILE_VNFS_SERVICES)
return test_VNFs_Services | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initialize(self):\n self.initialize_edges()\n self.initialize_prob()\n self.initialize_total_input_dict()\n\n self.initialize_fpmusigv_dict()",
"def _initialize_data(self):\n self.reset_count = 0\n self._idn_no_firmware = \"KEPCO,BOP 50-20,E1234,\"\n self._firmware = 2.6\n self._init_data()",
"def init_services(self):\n service_prefix = rospy.get_name() + \"/\"\n\n self._request_components_serv = rospy.Service(service_prefix +\n 'list_components',\n ListComponents,\n self.get_components)\n self._request_fields_serv = rospy.Service(service_prefix +\n 'list_fields',\n ListFields,\n self.get_fields)\n self._request_values_serv = rospy.Service(service_prefix +\n 'request_values',\n RequestValues,\n self.get_values)\n self._unsubscribe_values_serv = rospy.Service(service_prefix +\n 'unsubscribe_values',\n UnsubscribeValues,\n self.unsubscribe_values)",
"def setUp(self):\n \n chrom = \"1\"\n pos = \"15000000\"\n snp_id = \"CM00001\"\n ref = \"A\"\n alt = \"G\"\n filt = \"PASS\"\n \n # set up a SNV object, since SNV inherits VcfInfo\n self.var = SNV(chrom, pos, snp_id, ref, alt, filt)\n self.var.debug_chrom = \"1\"\n self.var.debug_pos = \"15000000\"\n \n self.default_info = \"HGNC=ATRX;CQ=missense_variant;random_tag\"\n \n \n # here are the default filtering criteria, as loaded into python\n known_genes = {\"ATRX\": {\"inheritance\": {\"Hemizygous\": \\\n {\"Loss of function\"}}, \"start\": \"10000000\", \"chrom\": \"1\", \\\n \"confirmed_status\": {\"Confirmed DD Gene\"}, \"end\": \"20000000\"}}\n \n SNV.known_genes = known_genes\n \n self.var.add_info(self.default_info)",
"def initialize_installation_vessel(self):\n\n support = self.config[\"oss_install_vessel\"]\n vessel = self.initialize_vessel(\"Floating Substation Installation Vessel\", support)\n self.env.register(vessel)\n vessel.initialize(mobilize=False)\n self.support_vessel = vessel\n\n depth = self.config[\"site\"][\"depth\"]\n towing_speed = self.config[\"offshore_substation_substructure\"].get(\"towing_speed\", 6)\n\n install_floating_substations(\n self.support_vessel,\n self.wet_storage,\n self.distance,\n towing_speed,\n depth,\n self.num_substations,\n )",
"def init(self, attrs):\n\n self.name = attrs[\"name\"]\n self.file_path = attrs.get(\"file\", \"pod.yaml\")\n\n self.nodes, self.nfvi_host, self.host_mgmt = \\\n self.helper.parse_pod_file(self.file_path, 'OvsDpdk')\n\n self.attrs = attrs\n self.vm_flavor = attrs.get('flavor', {})\n self.servers = attrs.get('servers', {})\n self.vm_deploy = attrs.get(\"vm_deploy\", True)\n self.ovs_properties = attrs.get('ovs_properties', {})\n # add optional static network definition\n self.networks = attrs.get(\"networks\", {})\n\n LOG.debug(\"Nodes: %r\", self.nodes)\n LOG.debug(\"NFVi Node: %r\", self.nfvi_host)\n LOG.debug(\"Networks: %r\", self.networks)",
"def initService(self):",
"def _initialize(self):\r\n print(\"Set the CP mode to EVSE\")\r\n self.whitebeet.controlPilotSetMode(1)\r\n print(\"Set the CP duty cycle to 100%\")\r\n self.whitebeet.controlPilotSetDutyCycle(100)\r\n print(\"Start the CP service\")\r\n self.whitebeet.controlPilotStart()\r\n print(\"Start SLAC in EVSE mode\")\r\n self.whitebeet.slacStart(1)\r\n time.sleep(2)",
"def init_structs(self):\n self.v6_struct = struct.Struct(V6_STRUCT_STRING)\n self.v5_struct = struct.Struct(V5_STRUCT_STRING)\n self.v4_struct = struct.Struct(V4_STRUCT_STRING)\n self.v3_struct = struct.Struct(V3_STRUCT_STRING)",
"def __init__(self):\n self.device_id = None\n self.devices = []\n self.onvif_config = {}",
"def init(self):\n self._service_store = ServiceStore(self.driver, self.network)\n self._emulator = NetworkEmulator(self.store, self.driver)",
"def __init__(self):\n\t\tself.config_file = \"c:/test-av/conf/vmware.conf\"\n\t\tself.conf = Config(self.config_file)\n\t\t#\n\t\t# you can redefine the Command class providing vmrun full path:\n\t\t# self.cmd = Command(\"vmrun full path\")\n\t\t#\n\t\t# TODO:\n\t\t#\t- Command class with one argument\n\t\tself.cmd = Command(self.conf.path, self.conf.host, self.conf.user, self.conf.passwd)\n\t\t\n\t\t# full paths of script neede for update\n\t\tself.netENScript=\"c:/Users/avtest/Desktop/EnableIF.bat\"\n\t\tself.netDISScript=\"c:/Users/avtest/Desktop/DisableIF.bat\"\n\t\tself.updScript=\"C:/Users/avtest/Desktop/AVUpdate.bat\"",
"def __init__(self):\n self.config = config.setup()\n self.log = logging.getLogger(__name__)\n #This block gets interface and interface type from config file\n self._lookupInterfaces()\n #And this one does the same for disks.\n self._lookupDisks()\n self.search_headers = self.config.get('VM', 'search_headers', 'name,uuid')\n self.headers = self.search_headers.split(',')\n def _error_handler(self, err):\n msg = \"Ignoring Libvirt error %s)\" % err\n pass\n # Prevent libvirt errors from reaching the console\n libvirt.registerErrorHandler(_error_handler, None)",
"def _configure_services(self):\n neutron_ovs_config = {}\n neutron_ovs_config['enable-sriov'] = True\n neutron_ovs_config['sriov-device-mappings'] = 'physnet42:eth42'\n\n pxc_config = {\n 'dataset-size': '25%',\n 'max-connections': 1000,\n 'root-password': 'ChangeMe123',\n 'sst-password': 'ChangeMe123',\n }\n nova_cc_config = {'network-manager': 'Neutron'}\n configs = {\n 'neutron-openvswitch': neutron_ovs_config,\n 'percona-cluster': pxc_config,\n 'nova-cloud-controller': nova_cc_config,\n }\n super(NeutronOVSBasicDeployment, self)._configure_services(configs)",
"def __init__(self, *args):\n _snap.PNEANetV_swiginit(self, _snap.new_PNEANetV(*args))",
"def __context_init(self):\n self._context.data[\"services\"] = copy.deepcopy(INITIAL_SRVDATA)",
"def __init__(self):\n logger.debug(\"VMPoolManager: _init_()\")\n self.system = State.Instance()\n \n self.VMPools = []\n e = EnvSetUp()\n config_spec = json.loads(open(e.get_ovpl_directory_path() + \"/config/config.json\").read())\n pools = config_spec[\"VMPOOL_CONFIGURATION\"][\"VMPOOLS\"]\n create_uri = config_spec[\"API_ENDPOINTS\"][\"CREATE_URI_ADAPTER_ENDPOINT\"]\n destroy_uri = config_spec[\"API_ENDPOINTS\"][\"DESTROY_URI_ADAPTER_ENDPOINT\"]\n\n for pool in pools:\n self.add_vm_pool( pool[\"POOLID\"], \\\n pool[\"DESCRIPTION\"], \\\n pool[\"ADAPTERIP\"], \\\n pool[\"PORT\"], \\\n create_uri, \\\n destroy_uri)\n\n logger.debug(\"VMPoolManager: _init_(); vm_pools = %s\" % (str(self.VMPools)))",
"def init_data(my_data, rp):\n\n msg.bold(\"initializing the sedov problem...\")\n\n # make sure that we are passed a valid patch object\n if not isinstance(my_data, patch.CellCenterData2d):\n print(\"ERROR: patch invalid in sedov.py\")\n print(my_data.__class__)\n sys.exit()\n\n # get the density, momenta, and energy as separate variables\n dens = my_data.get_var(\"density\")\n xmom = my_data.get_var(\"x-momentum\")\n ymom = my_data.get_var(\"y-momentum\")\n ener = my_data.get_var(\"energy\")\n\n # initialize the components, remember, that ener here is rho*eint\n # + 0.5*rho*v**2, where eint is the specific internal energy\n # (erg/g)\n dens[:, :] = 1.0\n xmom[:, :] = 0.0\n ymom[:, :] = 0.0\n\n E_sedov = 2.0e-3\n\n r_init = rp.get_param(\"sedov.r_init\")\n\n gamma = rp.get_param(\"eos.gamma\")\n pi = math.pi\n\n xmin = rp.get_param(\"mesh.xmin\")\n xmax = rp.get_param(\"mesh.xmax\")\n\n ymin = rp.get_param(\"mesh.ymin\")\n ymax = rp.get_param(\"mesh.ymax\")\n\n xctr = 0.5*(xmin + xmax)\n yctr = 0.5*(ymin + ymax)\n\n # initialize the pressure by putting the explosion energy into a\n # volume of constant pressure. Then compute the energy in a zone\n # from this.\n nsub = rp.get_param(\"sedov.nsub\")\n\n dist = np.sqrt((my_data.grid.x2d - xctr)**2 +\n (my_data.grid.y2d - yctr)**2)\n\n p = 1.e-5\n ener[:, :] = p/(gamma - 1.0)\n\n for i, j in np.transpose(np.nonzero(dist < 2.0*r_init)):\n\n xsub = my_data.grid.xl[i] + (my_data.grid.dx/nsub)*(np.arange(nsub) + 0.5)\n ysub = my_data.grid.yl[j] + (my_data.grid.dy/nsub)*(np.arange(nsub) + 0.5)\n\n xx, yy = np.meshgrid(xsub, ysub, indexing=\"ij\")\n\n dist = np.sqrt((xx - xctr)**2 + (yy - yctr)**2)\n\n n_in_pert = np.count_nonzero(dist <= r_init)\n\n p = n_in_pert*(gamma - 1.0)*E_sedov/(pi*r_init*r_init) + \\\n (nsub*nsub - n_in_pert)*1.e-5\n\n p = p/(nsub*nsub)\n #\n # ener[i, j] = p/(gamma - 1.0)\n\n # W = 1\n rhoh = eos.rhoh_from_rho_p(gamma, dens[i, j], p)\n ener[i, j] = rhoh - p - dens[i, j]",
"def __init__(self, version=None):\n super(VirtualNetworkApplianceService, self).__init__(\n service_type='virtual-network-appliance',\n version=version\n )",
"def __init__(self, data):\n self.helper = Helper()\n self.CVE_DATA = {}\n self.DELETE_CVE_DATA = {}\n self.DELTA_FEED = {}\n self.utc_now = datetime.utcnow()\n self.today = self.utc_now.replace(hour=0, minute=0, second=0, microsecond=0)\n self.day = self.utc_now.weekday()\n\n # This is the offset days i.e how many days old data do we need to ingest.\n delta_feed_offset = int(os.environ.get('SNYK_DELTA_FEED_OFFSET', '1'))\n self.start_day = self.today - timedelta(days=delta_feed_offset)\n\n # If we want to selectively run the ingestion for an ecosystem.\n self.selective_eco_run = os.environ.get('SELECTIVE_ECOSYSTEM_SNYK_SYNC', '')\n if not data:\n self.snyk_data = self.helper.read_data_from_s3(self.utc_now.strftime('%d-%m-%Y'),\n \"snyk-feed/\")\n # For testing purpose we can use the sample feed\n \"\"\"with open('data/feed_sample.json', encoding='utf-8') as f:\n x = json.load(f)\n self.snyk_data = x\"\"\"\n else:\n self.snyk_data = data\n self.SNYK_REPORT = self._populate_default_report()",
"def dvs_vcenter_systest_setup(self):\n self.env.revert_snapshot(\"ready_with_5_slaves\")\n\n self.show_step(1)\n self.show_step(2)\n plugin.install_dvs_plugin(self.ssh_manager.admin_ip)\n\n self.show_step(3)\n cluster_id = self.fuel_web.create_cluster(\n name=self.__class__.__name__,\n mode=DEPLOYMENT_MODE,\n settings={\n \"net_provider\": 'neutron',\n \"net_segment_type\": NEUTRON_SEGMENT_TYPE\n }\n )\n plugin.enable_plugin(cluster_id, self.fuel_web)\n\n self.show_step(4)\n self.show_step(5)\n self.show_step(6)\n self.fuel_web.update_nodes(cluster_id,\n {'slave-01': ['controller'],\n 'slave-02': ['compute-vmware'],\n 'slave-03': ['compute'],\n 'slave-04': ['compute']})\n\n # Configure VMWare vCenter settings\n target_node_2 = self.node_name('slave-02')\n self.fuel_web.vcenter_configure(cluster_id,\n target_node_2=target_node_2,\n multiclusters=True)\n\n self.show_step(7)\n self.fuel_web.deploy_cluster_wait(cluster_id)\n\n self.show_step(8)\n self.fuel_web.run_ostf(cluster_id=cluster_id, test_sets=['smoke'])\n\n self.show_step(9)\n self.env.make_snapshot(\"dvs_vcenter_systest_setup\", is_make=True)",
"def _setup(self) -> None:\n self._api = get_api(\n self._password,\n self._host,\n self._username,\n self._port,\n self._ssl,\n )\n\n self._info = self._api.get_info()\n self.device_name = self._info.get(\"DeviceName\", DEFAULT_NAME)\n self.model = self._info.get(\"ModelName\")\n self.firmware_version = self._info.get(\"Firmwareversion\")\n\n for model in MODELS_V2:\n if self.model.startswith(model):\n self._method_version = 2",
"def init_VI():\n\n\tprint 'Setting VI'\n\tvi = UsbVehicleInterface(payload_format=\"json\")\n\n\treturn vi",
"def __init__(self):\n self.ipaddress = None\n self.typeInfo['ipaddress'] = 'string'\n \"\"\"The mode of the VSM (standalone/HA)\"\"\"\n self.vsmconfigmode = None\n self.typeInfo['vsmconfigmode'] = 'string'\n \"\"\"The Config State (Primary/Standby) of the VSM\"\"\"\n self.vsmconfigstate = None\n self.typeInfo['vsmconfigstate'] = 'string'\n \"\"\"control vlan id of the VSM\"\"\"\n self.vsmctrlvlanid = None\n self.typeInfo['vsmctrlvlanid'] = 'int'\n \"\"\"device id of the Cisco N1KV VSM device\"\"\"\n self.vsmdeviceid = None\n self.typeInfo['vsmdeviceid'] = 'string'\n \"\"\"device name\"\"\"\n self.vsmdevicename = None\n self.typeInfo['vsmdevicename'] = 'string'\n \"\"\"device state\"\"\"\n self.vsmdevicestate = None\n self.typeInfo['vsmdevicestate'] = 'string'\n \"\"\"The Device State (Enabled/Disabled) of the VSM\"\"\"\n self.vsmdevicestate = None\n self.typeInfo['vsmdevicestate'] = 'string'\n \"\"\"The VSM is a switch supervisor. This is the VSM's switch domain id\"\"\"\n self.vsmdomainid = None\n self.typeInfo['vsmdomainid'] = 'string'\n \"\"\"management vlan id of the VSM\"\"\"\n self.vsmmgmtvlanid = None\n self.typeInfo['vsmmgmtvlanid'] = 'string'\n \"\"\"packet vlan id of the VSM\"\"\"\n self.vsmpktvlanid = None\n self.typeInfo['vsmpktvlanid'] = 'int'\n \"\"\"storage vlan id of the VSM\"\"\"\n self.vsmstoragevlanid = None\n self.typeInfo['vsmstoragevlanid'] = 'int'",
"def startup(self):\n for v in self.virt_nodes:\n v.create()\n \n \"\"\" scan for nodes \"\"\"\n self.scan_for_nodes()\n \n \"\"\" connect to all nodes and call setup \"\"\"\n for n in self.scan_nodes:\n n.connect()\n \n ''' list of open addresses for the node '''\n oalist = []\n \n ''' if the multicast interface is defined use it as open address '''\n if self.mcast_interface != \"\":\n oalist.append(self.mcast_interface)\n \n ''' open the connection to the default address of the slave '''\n oalist.append(socket.gethostbyname(socket.gethostname()))\n \n ''' read the monitor node list '''\n monitor_list = open(os.path.join(self.workdir, \"monitor-nodes.txt\"), \"r\")\n for maddress in monitor_list.readlines():\n oalist.append(maddress.strip())\n \n ''' call the setup procedure '''\n n.setup(oalist)",
"def parameter_initialization(self):\n dictsize = settings.PARS.get('numBases')\n numClass = self.train_labels.shape[0] # number of objects\n Dinit = np.empty((self.train_feats.shape[0], 0)) # for C-Ksvd and D-Ksvd\n dictLabel = np.empty((numClass, 0), dtype=np.int)\n numPerClass = dictsize//numClass\n param1 = {\n 'mode': 2,\n 'K': settings.PARS.get('numBases'), # size of the dictionary\n 'lambda1': settings.PARS.get('lambda_'),\n 'lambda2': 0,\n 'iter': settings.PARS.get('iterationini')\n }\n param2 = {\n 'lambda1': settings.PARS.get('lambda_'),\n 'lambda2': 0,\n 'mode': 2\n }\n\n for classid in range(numClass):\n col_ids = np.array(np.nonzero(self.train_labels[classid, :] == 1)).ravel()\n # ensure no zero data elements are chosen\n data_ids = np.array(np.nonzero(np.sum(self.train_feats[:, col_ids]**2, axis=0) > 1e-6)).ravel()\n\n # Raising an error if any zero lement is found\n if col_ids.shape[0] != data_ids.shape[0]:\n raise DatasetZeroElementFound\n\n # Initilization for LC-KSVD (perform KSVD in each class)\n Dpart = self.train_feats[:, col_ids[np.random.choice(data_ids, numPerClass, replace=False)]]\n param1['D'] = Dpart # initial dictionary\n Dpart = trainDL(self.train_feats[:, col_ids[data_ids]], **param1)\n Dinit = np.c_[Dinit, Dpart]\n labelvector = np.zeros((numClass, 1), dtype=np.int)\n labelvector[classid] = 1\n dictLabel = np.c_[dictLabel, np.tile(labelvector, (1, numPerClass))]\n\n param1['D'] = np.asfortranarray(Dinit) # initial dictionary\n # RuntimeError: matrix arg 10 must be a 2d double Fortran Array\n self.train_feats = self.train_feats if np.isfortran(self.train_feats) else np.asfortranarray(self.train_feats)\n Dinit = trainDL(self.train_feats, **param1)\n Xinit = lasso(self.train_feats, Dinit, **param2)\n\n # learning linear classifier parameters\n tmp = np.linalg.inv([email protected]+np.eye(*([email protected]).shape))@Xinit\n Winit = [email protected]_labels.T\n Winit = Winit.T\n\n Q = np.zeros((dictsize, self.train_feats.shape[1])) # energy matrix\n\n for frameid in range(self.train_feats.shape[1]):\n label_training = self.train_labels[:, frameid]\n maxid1 = label_training.argmax(0)\n\n for itemid in range(Dinit.shape[1]):\n label_item = dictLabel[:, itemid]\n maxid2 = label_item.argmax(0)\n\n if maxid1 == maxid2:\n Q[itemid, frameid] = 1\n\n Tinit = [email protected]\n Tinit = Tinit.T\n\n return Dinit, Winit, Tinit, Q",
"def __init__(self, logical_services_node=None):\n super(ServicesNodeIpSecConfig, self).__init__()\n self.log = logger.setup_logging(self.__class__.__name__)\n self.schema_class = 'ipsec_config_schema.IpSecConfigSchema'\n\n if logical_services_node is not None:\n self.set_connection(logical_services_node.get_connection())\n\n self.set_create_endpoint(\"/lservices-nodes/\" + logical_services_node.id + \"/service-bindings/ipsec/config\")\n self.id = None",
"def __init__(self):\n self._lib_vscf_ecc = VscfEcc()\n self._c_impl = None\n self._ctx = None\n self.ctx = self._lib_vscf_ecc.vscf_ecc_new()",
"def create_vectors(self):\n self.localStatistics = []\n self.lastStatistics = []\n self.globalV = []\n self.estimate = []\n self.delta = []\n self.drift = []\n self.slack = [] # only for coordBased model",
"def initialize(self):\n if self.real:\n self.agent.connect(self)\n else:\n self.connect() # Connect python client to VREP\n self.agent.connect(self)"
] | [
"0.6000072",
"0.5982209",
"0.59133905",
"0.58482045",
"0.5846955",
"0.5802692",
"0.57983714",
"0.57908916",
"0.57861805",
"0.57234365",
"0.5716901",
"0.57001555",
"0.56514275",
"0.5569302",
"0.553498",
"0.5508897",
"0.5441034",
"0.54278976",
"0.5421931",
"0.5419489",
"0.54168147",
"0.5416671",
"0.5414111",
"0.5396242",
"0.5395286",
"0.5393877",
"0.539357",
"0.5392984",
"0.53839904",
"0.5381111"
] | 0.6732011 | 0 |
Append an object to a list of strings and adds a timestamp. | def append_to_list(self, string_to_append):
if type(string_to_append)==str:
current_time = datetime.now()
self.__string_list.append(string_to_append)
self.__timestamp_list.append(current_time) # timestamp will have the same index as string
else:
print("appended object must be a string, string_to_append=",string_to_append)
sys.exit() # stop entire program, because string MUST be correct | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(self, timestamp):\n self.total_count += 1\n self.times.append(timestamp)",
"def append(self, object):\r\n raise NotImplementedError()",
"def append(self, obj):\r\n raise NotImplementedError",
"def append(self, ts: Union[str, datetime.datetime, None], attribute: Any, raise_errors: bool = False):\n if ts is None:\n ts = datetime.datetime.now(self.timezone)\n elif isinstance(ts, str):\n try:\n ts = datetime.datetime.fromisoformat(ts).astimezone(self.timezone)\n except ValueError:\n if raise_errors:\n raise\n ts = datetime.datetime.now(self.timezone)\n\n if len(self.history) > 0:\n latest_ts, = self.history[-1]\n if latest_ts > ts:\n if raise_errors:\n raise ValueError('Timestamp out of order {}'.format(ts.isoformat()))\n else:\n self.consistent = False\n\n self.latest = ts\n self.history.append((ts, self.set_current(attribute)))",
"def append_event(self, timestamp: datetime, event_type: EventType):",
"def add(self, *args):\n return _libsbml.ListWrapperDate_add(self, *args)",
"def append_new_object(some_list):\n\n new_list = some_list + [9]\n print(\"We make a new list, CATing onto the old list: new_list = some_list + [9]\")\n print(f\"new_list = {new_list}\")\n print(f\"some_list = {some_list}\")\n print(f\"id of new_list = {id(new_list)}\")\n print(f\"id of some_list = {id(some_list)}\")\n return \"done\"",
"def get_timestamped_strings(self):\n ret_list = []\n i = 0\n while i < len(self.__string_list):\n ret_list.append(self.__timestamp_list[i].strftime(\"%Y-%m-%d %H:%M:%S\")+\" \"+self.__string_list[i])\n i += 1\n return ret_list",
"def append(self, item):\n self.update([item])",
"def append(self, entry):\n self.strings.append(entry)",
"def append(self, path):\n self.paths.append(path)\n self.time += path.time",
"def hit(self, timestamp):\n self.l.append(timestamp)",
"def append(self, object):\n self.data['object'].append(object)\n self.data['id'].append(self.start_id)\n for col in self.cols:\n if col != 'object' and col != 'id':\n self.data[col].append(None)\n self.start_id += 1\n return self",
"def append(self, *args):\n self.add(*args)",
"def _add(object, name, value):\n self.__added__.append(name)\n setattr(object, name, value)",
"def add(self, name, value):\n assert isinstance(name, str)\n\n if isinstance(value, str):\n self.__getitem__(name).append(value)\n elif isinstance(value, Iterable):\n self.__getitem__(name).extend(value)\n elif isinstance(value, datetime):\n self.__getitem__(name).append(rfc1123_datetime_encode(value))\n else:\n self.__getitem__(name).append(str(value))",
"def add_elements(self, elements):\n timestamped_values = []\n for element in elements:\n if isinstance(element, TimestampedValue):\n timestamped_values.append(element)\n elif isinstance(element, WindowedValue):\n # Drop windows for elements in test stream.\n timestamped_values.append(\n TimestampedValue(element.value, element.timestamp))\n else:\n # Add elements with timestamp equal to current watermark.\n timestamped_values.append(\n TimestampedValue(element, self.current_watermark))\n self._add(ElementEvent(timestamped_values))\n return self",
"def add(self, elem):\n self.add_last(elem)",
"def add(self, message, time):\n if message not in self.results.keys():\n self.results[message] = [time]\n\n self.results[message].append(time)",
"def do_append(self, text):\n args = text.split()\n if len(args) == 1:\n try:\n self.list.append(int(args[0]))\n print(self.list, sep=', ')\n except ValueError:\n print('Error: invalid literal.')\n else:\n print('Error: append takes only one parameter.')",
"def append(self, string):\r\n self.word_list.append(string)",
"def append(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass",
"def append(self, *args, **kwargs): # real signature unknown\n pass",
"def append(self, dat, aduc):\n self.datelist.append(dat)\n self.adulist.append(aduc)",
"def append(self, value):\n self.list.append(value)",
"def add(self, *items):",
"def append(self, obj):\r\n self.record_count += 1\r\n \r\n if type(obj) == dict:\r\n self._probe_record(obj)\r\n else:\r\n self._probe_row(obj)",
"def add(self, obj: T) -> None:\n self._items.append(obj)\n self._size += 1",
"def push(self, value):\n self.values.append((time.time(), value))",
"def __iadd__(self, obj):\n if not vedo.utils.is_sequence(obj):\n obj = [obj]\n for a in obj:\n if a:\n self.AddPart(a)\n return self"
] | [
"0.63131636",
"0.60031503",
"0.5990919",
"0.5893538",
"0.58589745",
"0.58204615",
"0.5716781",
"0.56838316",
"0.5644069",
"0.56173307",
"0.56059915",
"0.5589093",
"0.5588692",
"0.55833554",
"0.55797154",
"0.5551652",
"0.55227566",
"0.55158985",
"0.5504161",
"0.5495428",
"0.54863995",
"0.5482246",
"0.5463265",
"0.54515964",
"0.5451235",
"0.542525",
"0.54165304",
"0.54006946",
"0.5398305",
"0.5395257"
] | 0.78029543 | 0 |
return a list of strings with timestamps as prefixes (not showing microseconds). | def get_timestamped_strings(self):
ret_list = []
i = 0
while i < len(self.__string_list):
ret_list.append(self.__timestamp_list[i].strftime("%Y-%m-%d %H:%M:%S")+" "+self.__string_list[i])
i += 1
return ret_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_time_strs(self):\n\n log(\"Getting time strings starting at {}\".format(self._t0))\n tz = dt.timezone.utc\n mkdt = lambda n: dt.datetime.fromtimestamp(\n self._t0 - (self._delta * n),\n tz=tz\n )\n ns = range(self._frames, 0, -1)\n return [mkdt(n).strftime('%Y%m%d%H%M') for n in ns]",
"def _timestamp(self):\n\n retval = []\n\n if self.log_level >= _Log.DEBUG:\n retval.append('%f: ' % (time.time() - self.start_time,))\n\n return ''.join(retval)",
"def get_timestamped_metric_values_as_strings(self):\n ret_list = []\n i = 0\n while i < len(self.__metric_value_list):\n ret_list.append(self.__metric_value_list[i].timestamp.strftime(\"%Y-%m-%d %H:%M:%S\") + \" \" +\n str(self.__metric_value_list[i].value) +\n \"(\" + str(self.__metric_value_list[i].metric_def_ID) + \")\")\n i += 1\n return ret_list",
"def tickStrings(values, scale, spacing):\n # sending a list of values in format \"HH:MM:SS.SS\" generated from Total seconds.\n return [(int2dt(value).strftime(\"%H:%M:%S.%f\"))[:-4] for value in values]",
"def call_list_timestamp(timestamp):\n return datetime.datetime.utcfromtimestamp(timestamp).isoformat()",
"def timestamps():\n timestamps = ( # Index\n 1459516622.1, # 0\n 1459516622.2, # 1\n 1459516622.3, # 2\n 1459516623.0, # 3\n 1459516623.1, # 4\n 1459516623.3, # 5\n 1459516624.0, # 6\n )\n return timestamps",
"def timestamps(self) -> List[T]:\n return self._timestamps",
"def __call__(self, x: Sequence[datetime]) -> Sequence[str]:\n if self.tz is not None:\n x = [d.astimezone(self.tz) for d in x]\n return [d.strftime(self.fmt) for d in x]",
"def tickStrings(self, values, scale, spacing):\n ret = []\n if not values:\n return []\n\n if spacing >= 31622400: # 366 days\n fmt = \"%Y\"\n\n elif spacing >= 2678400: # 31 days\n fmt = \"%Y %b\"\n\n elif spacing >= 86400: # = 1 day\n fmt = \"%b/%d\"\n\n elif spacing >= 3600: # 1 h\n fmt = \"%b/%d-%Hh\"\n\n elif spacing >= 60: # 1 m\n fmt = \"%H:%M\"\n\n elif spacing >= 1: # 1s\n fmt = \"%H:%M:%S\"\n\n else:\n # less than 2s (show microseconds)\n # fmt = '%S.%f\"'\n fmt = '[+%fms]' # explicitly relative to last second\n\n for x in values:\n try:\n t = datetime.fromtimestamp(x)\n ret.append(t.strftime(fmt))\n except ValueError: # Windows can't handle dates before 1970\n ret.append('')\n\n return ret",
"def gmt(time):\n gmt = [0]*time.size\n for i in range(time.size):\n gmt[i]=datetime.utcfromtimestamp(time[i]).strftime('%Y-%m-%d %H:%M:%S')\n return gmt",
"def header_names(self) -> list[str]:\n return [*filter(lambda t: self.timestamped, [\"asctime\"]), \"levelname\"]",
"def getTimeStamps():\n\n # Initialize\n results = dict()\n\n # UT time\n ut = utils.getUT(pointing=True).split()\n results['utday'] = ut[0]\n results['ut'] = float(ut[1])\n\n # year/month/day/second\n utStamp = time.gmtime()\n utHour = maybeAddAZero(utStamp[3])\n utMin = maybeAddAZero(utStamp[4])\n utSec = maybeAddAZero(utStamp[5])\n results['timeLab'] = ''.join([commands.yearMonthDay(),'_',utHour,utMin,utSec])\n\n # Done\n return results",
"def _format_timestamps(self):\n epoch_pattern = \"\\d{13}\"\n iso_pattern = \"\\d{4}/\\d{2}/\\d{2}\"\n\n formatted_timestamps = []\n if re.match(epoch_pattern, self.timestamps[0]):\n for ts in self.timestamps:\n fmt_ts = pd.to_datetime(int(ts), unit=\"ms\").strftime(\"%Y/%m/%d\")\n formatted_timestamps.append(fmt_ts)\n elif re.match(iso_pattern, self.timestamps[0]):\n for ts in self.timestamps:\n y, m, d = ts.split(\"/\")\n fmt_ts = datetime(int(y), int(m), int(d)).strftime(\"%Y/%m/%d\")\n formatted_timestamps.append(fmt_ts)\n else:\n raise TimestampError\n\n return formatted_timestamps",
"def timestamp_encode(timestamps: List[int]) -> List[int]:\n return _encode(timestamps, Encoder, Encoding)",
"def timeStamps(dataset):\n \n timestamps = []\n \n for index, row in enumerate(dataset):\n try:\n timeObj = datetime.datetime.strptime(timeStampFix(row), '%y:%j:%H:%M:%S')\n except ValueError:\n print('Failed to create datetime object for ' + timeStampFix(row))\n timestamps.append(timeObj)\n \n return timestamps",
"def list_times(self, start: int = None, end: int = None) -> List:\n return [i.time for i in self.data[start:end]]",
"def timestamps(self) -> List[float]:\n return self._timestamps",
"def timestamps(self) -> List[float]:\n return self._timestamps",
"def get_times_list(binout):\r\n return sorted([float(\"{0:15.6f}\".format(t)) for t in\r\n binout.recordarray[\"totim\"]])",
"def get_times(my_vars):\n base_time = my_vars['base_time'].getValue()\n try:\n times=my_vars['time']\n except KeyError:\n times = my_vars['time_offset']\n\n ts = []\n for time in times:\n temp = datetime.utcfromtimestamp(base_time+time)\n if (temp.minute == 0) :\n ts.append(temp)\n return ts",
"def get_labels(self):\n return [\"00:00\", \"04:00\", \"08:00\", \"12:00\", \"16:00\", \"20:00\", \"00:00\"]",
"def get_labels(self):\n return [\"00:00\", \"04:00\", \"08:00\", \"12:00\", \"16:00\", \"20:00\", \"00:00\"]",
"def get_labels(self):\n return [\"00:00\", \"04:00\", \"08:00\", \"12:00\", \"16:00\", \"20:00\", \"00:00\"]",
"def dump_datetime(value):\n if value is None:\n return\n return [value.strftime(\"%Y-%m-%d\"), value.strftime(\"%H:%M:%S\")]",
"def timestamps_sorted_list(self) -> List[int]:\n if len(self._timestamps_sorted_list) == 0:\n # Need to sort\n self._timestamps_sorted_list = sorted(list(self.keys()))\n if len(self._timestamps_sorted_list) > 0:\n self._first_timestamp = self._timestamps_sorted_list[0]\n if len(self._timestamps_sorted_list) > 1:\n self._last_timestamp = self._timestamps_sorted_list[-1]\n return self._timestamps_sorted_list",
"def get_timescale_stringlist(self):\n return text_timescale",
"def get_timestamps( self, raster_pos=None ):\n if raster_pos is None:\n headers = self.time_specific_headers\n else:\n headers = self.get_raster_pos_headers( raster_pos )\n \n return [to_epoch( from_Tformat( h['DATE_OBS'] ) ) for h in headers]",
"def get_timestamps(self) -> List[datetime.datetime]:\n return [activity.timestamp for activity in self.activities]",
"def _get_timestamps(self, time_interval: RawTimeIntervalType | None, bbox: BBox) -> list[dt.datetime]:",
"def get_timestring_from_int(time_array, format=\"%H:%M:%S\"):\n list = []\n for value in time_array:\n list.append((value, int2dt(value, 1).strftime(format)))\n return list"
] | [
"0.7256951",
"0.6693242",
"0.6453684",
"0.6298663",
"0.62940955",
"0.61848104",
"0.6147845",
"0.6098919",
"0.6096851",
"0.6066497",
"0.6043481",
"0.603737",
"0.59519106",
"0.58646226",
"0.5857015",
"0.58160734",
"0.5814002",
"0.5814002",
"0.5812247",
"0.5737909",
"0.57244587",
"0.57244587",
"0.57244587",
"0.56416154",
"0.56149805",
"0.55726796",
"0.55446565",
"0.55314064",
"0.54951805",
"0.5474389"
] | 0.78146636 | 0 |
Generic function to dump all Challenge Execution data in a CSV file. | def write_to_csv(self):
dump_list = []
# add rows one by one, each as a list, even if only 1 element
dump_list.append(["challenge execution ID",self.ID])
dump_list.append(["challenge execution name",self.name])
dump_list.append(["challenge definition ID",self.challenge_def_ID])
challenge_def_name = get_indexed_item_from_file(self.challenge_def_ID, FILE_CHALLENGE_DEFINITIONS)
dump_list.append(["challenge definition name",challenge_def_name])
if self.start_time != None:
dump_list.append(["challenge start time",self.start_time.strftime("%Y-%m-%d %H:%M:%S")])
if self.stop_time != None:
dump_list.append(["challenge stop time",self.stop_time.strftime("%Y-%m-%d %H:%M:%S")])
if self.log.length() > 0 :
dump_list.append(["Log:"])
for item in self.log.get_timestamped_strings():
dump_list.append([item])
if self.CLI_responses.length() > 0 :
dump_list.append(["CLI responses:"])
for item in self.CLI_responses.get_timestamped_strings():
dump_list.append([item])
if self.API_responses.length() > 0 :
dump_list.append(["API responses:"])
for item in self.API_responses.get_timestamped_strings():
dump_list.append([item])
try:
# output CSV file name: challDefExec + ID + start time + .csv
file_name = "challDefExec" + "{0:0=3d}".format(self.challenge_def_ID) + "-" + self.start_time.strftime("%Y-%m-%d-%H-%M-%S") + ".csv"
with open(file_name, "w", newline="") as file:
csv_file_writer = csv.writer(file)
csv_file_writer.writerows(dump_list)
except Exception as e:
print(type(e), e)
sys.exit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_to_csv(self):\n\n dump_list = []\n\n # add rows one by one, each as a list, even if only 1 element\n\n dump_list.append([\"test execution ID\",self.ID])\n dump_list.append([\"test execution name\",self.name])\n\n dump_list.append([\"test definition ID\",self.test_def_ID])\n test_def_name = get_indexed_item_from_file(self.test_def_ID, FILE_TEST_DEFINITIONS)\n dump_list.append([\"test definition name\",test_def_name])\n\n dump_list.append([\"associated challenge execution ID\",self.challenge_exec_ID])\n dump_list.append([\"user ID\",self.user_ID])\n\n if self.start_time != None:\n dump_list.append([\"test start time\",self.start_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n\n if self.finish_time != None:\n dump_list.append([\"test finish time\",self.finish_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n\n if self.challenge_start_time != None:\n dump_list.append([\"challenge stop time\",self.challenge_start_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n if self.restoration_detection_time != None:\n dump_list.append([\"restoration detection time\",self.restoration_detection_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n if self.recovery_time != None:\n if self.recovery_time.value != None:\n if type(self.recovery_time.value)==timedelta:\n # timedelta: days and seconds are attributes, total_seconds() is a method\n dump_list.append([\"MEASURED RECOVERY TIME (s)\",self.recovery_time.value.total_seconds()])\n rtday = self.recovery_time.value.days\n rthrs = self.recovery_time.value.seconds // 3600\n rtmin = (self.recovery_time.value.seconds % 3600) // 60\n rtsec = self.recovery_time.value.seconds % 60\n rtmil = self.recovery_time.value.microseconds\n dump_list.append([\"MEASURED RECOVERY TIME (days, hours, mins, seconds, microseconds)\",\n rtday, rthrs, rtmin, rtsec, rtmil])\n\n if self.associated_metric_values.length() > 0 :\n dump_list.append([\"Metric Values:\"])\n for item in self.associated_metric_values.get_timestamped_metric_values_as_strings():\n dump_list.append([item])\n\n if self.log.length() > 0 :\n dump_list.append([\"Log:\"])\n for item in self.log.get_timestamped_strings():\n dump_list.append([item])\n\n if self.CLI_responses.length() > 0 :\n dump_list.append([\"CLI responses:\"])\n for item in self.CLI_responses.get_timestamped_strings():\n dump_list.append([item])\n\n if self.API_responses.length() > 0 :\n dump_list.append([\"API responses:\"])\n for item in self.API_responses.get_timestamped_strings():\n dump_list.append([item])\n\n try:\n # output CSV file name: testDefExec + ID + start time + .csv\n file_name = \"testDefExec\" + \"{0:0=3d}\".format(self.test_def_ID) + \"-\" + self.start_time.strftime(\"%Y-%m-%d-%H-%M-%S\") + \".csv\"\n with open(file_name, \"w\", newline=\"\") as file:\n csv_file_writer = csv.writer(file)\n csv_file_writer.writerows(dump_list)\n except Exception as e:\n print(type(e), e)\n sys.exit()",
"def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )",
"def DumpCsv(data):\n \n raise Exception('TBI: Need standard container structure for this to work, cause its flat...')",
"def write_csv(self):\n with open(paths.CSV_FILE, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n assg = AssignmentConfig().get_assignment()\n writer.writerow([\"Student\"] + assg.get_test_list() + assg.get_programs_list() +\n [\"normalised_test_score\"] + [\"normalised_prog_score\"] + [\"total\"] + [\"total_rounded\"])\n\n for (submitter, submitter_data) in sorted(self.snapshot['results'].items()):\n total_score = submitter_data[\"normalised_test_score\"] + submitter_data[\"normalised_prog_score\"]\n total_rounded = round(total_score * 2) / 2 # total score rounded to nearest 0.5\n writer.writerow([submitter] +\n [submitter_data[\"tests\"][test] for test in sorted(submitter_data[\"tests\"])] +\n [submitter_data[\"progs\"][prog] for prog in sorted(submitter_data[\"progs\"])] +\n [submitter_data[\"normalised_test_score\"]] +\n [submitter_data[\"normalised_prog_score\"]] +\n [round(total_score, 2)] +\n [total_rounded])",
"def _csv_export(self, exppath):\n with open(exppath, 'w') as csvfile:\n csvwriter = csv.writer(csvfile, delimiter=',', skipinitialspace=True)\n csvwriter.writerow(['hexstr','dmc','name'])\n for clr in self.lookup_table:\n csvwriter.writerow([clr.hex.to_str(), clr.id, clr.name])",
"def write_csv(file_name, data):\n\n with open(file_name, \"w\") as fp:\n\n writer = RiscvInstructionTraceCsv(fp)\n writer.start_new_trace()\n\n for entry in data:\n writer.write_trace_entry(entry)",
"def output_csv(vk4_container, args, data):\n log.debug(\"Entering output_csv()\\n\\tData Layer: {}\".format(args.layer))\n\n out_file_name = output_file_name_maker(args) + '.csv'\n\n width = vk4_container.image_width\n height = vk4_container.image_height\n\n data = np.reshape(data, (height, width))\n log.debug(\"\\n\\tData:\\n\\t%r\".format(data))\n\n with open(out_file_name, 'w') as out_file:\n if args.type == 'hcsv':\n header = create_file_meta_data(vk4_container, args)\n np.savetxt(out_file, header, delimiter=',', fmt='%s')\n out_file.write('\\n')\n np.savetxt(out_file, data, delimiter=',', fmt='%d')\n\n log.debug(\"Exiting output_csv()\")",
"def generate_csv(self, output_file):\n try: # We are going to \"try\" something\n csv_file = open(output_file, 'w+') # open \"output_file\" as a writable file and return a handle called \"csv_file\"\n except OSError as err: # If something goes wrong with the open, we catch the exception\n fatal(\"{0}\".format(err), -1) # exit with something other than 0 so the shell knows something went wrong\n \n writer = csv.writer(csv_file) # create a CSV writing object that's pointing at our open file handle\n\n writer.writerow([\"Question\",\"Answers\"]) # Let's write the top row\n for k in self.questions.keys(): # Let's walk down the directory by key\n # write the \"key\" (which is the question) and then let's take the list of answers and create a comma delmited list.\n # this is likely totally wrong since you could have an answer in it that also has a comma...\n writer.writerow([k, \",\".join(self.questions[k].answers)]) # insert a key (which is the question) and then let's take the array of \n\n csv_file.close() # close the csv_file file handle",
"def dump_all_binaries_to_CSV():\n ## TODO\n timenow = datetime.now()",
"def csv_output(self):\r\n fh = open(\"output.csv\",'w')\r\n for i in range(len(self.population.columns)):\r\n if i != len(self.population.columns)-1:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\"\\n\")\r\n\r\n for i in range(len(self.population.data)):\r\n for j in range(len(self.population.data[i])):\r\n if j != len(self.population.data[i])-1:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\"\\n\")\r\n fh.close()",
"def export_results(n, dict_all_embeddings, dict_mission, our_initial, name, mission):\r\n csv_columns = [\"initial size\", \"embed algo\", \"regression\", \"test\", \"micro-f1\", \"macro-f1\", \"auc\", \"time\"]\r\n dict_data = create_dicts_for_results(dict_all_embeddings, dict_mission, our_initial, n)\r\n csv_file = os.path.join(\"..\", \"files\", \"{} {}.csv\".format(name, mission))\r\n try:\r\n with open(csv_file, 'w') as csvfile:\r\n writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\r\n writer.writeheader()\r\n for data in dict_data:\r\n writer.writerow(data)\r\n except IOError:\r\n print(\"I/O error\")",
"def _write_csv(self):\n\n # add the label to the header\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self.header.append('Date')\n else:\n self.header.append('sample id')\n\n key_list = []\n\n for i, cube in enumerate(self.cube_list):\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self._write_sample_with_date(cube, i, key_list)\n else:\n self._write_sample(cube, i, key_list)\n\n output_data_file_path = self._get_full_file_name()\n self._write_data_dict(output_data_file_path, key_list)\n\n return [output_data_file_path]",
"def to_csv(self, dataset):\n save_as = filedialog.asksaveasfilename(defaultextension='.csv')\n try:\n with open(save_as, 'w', newline='') as file:\n scribe = csv.writer(file)\n scribe.writerow(HEADERS)\n for row in dataset:\n scribe.writerow(row.values())\n self.info_success(save_as)\n except IOError:\n self.info_error()\n return",
"def create_csv(self):\n try:\n # Convert List of Lists to DataFrame and write it to a CSV\n pd.DataFrame(self.data, columns=self.header) \\\n .to_csv(os.path.join(self.file_path, self.file_name), index=False)\n self.successful_run = True\n except:\n # TODO create Exception Handling\n raise",
"def export_data(self):\r\n \r\n \r\n output_file = 'export.csv'\r\n data = self.get_raw_data()\r\n \r\n if data != []:\r\n print('Writing to file', output_file)\r\n with open(output_file, 'w',) as csvfile:\r\n fluorescence_levels = csv.writer(csvfile)\r\n fluorescence_levels.writerow(['sensor_1','Time'])\r\n for i in data:\r\n fluorescence_levels.writerow(i)\r\n print('done')\r\n \r\n else:\r\n print('no recorded data')",
"def export_csv(self):\n outputfile = tkinter.filedialog.asksaveasfilename(\n defaultextension=\".csv\",\n filetypes=((\"comma seperated values\", \"*.csv\"),\n (\"All Files\", \"*.*\")))\n if outputfile:\n tabledata = self.tabs.window.aistracker.create_table_data()\n export.write_csv_file(tabledata, outputfile)\n else:\n raise ExportAborted('Export cancelled by user.')",
"def output_data(self):\n if not self.is_record:\n logging.error('Env: no record to output!')\n else:\n control_data = pd.DataFrame(self.control_data)\n control_data.to_csv(self.output_path + ('%s_%s_control.csv' % (self.name, self.agent)))",
"def to_csv(self, path):\n if os.path.isdir(path):\n shutil.rmtree(os.path.join(path))\n os.makedirs(path)\n\n for name, df in self.input_data.items():\n name += \".csv\"\n filename = os.path.join(path, name)\n df.to_csv(filename)\n logging.info(\"Scenario saved as csv-collection to %s\", path)",
"def to_csv(self, path):\n for table in ['datasets', 'dataruns', 'hyperpartitions', 'classifiers']:\n df = pd.read_sql('SELECT * FROM %s' % table, self.session.bind)\n df.to_csv(os.path.join(path, '%s.csv' % table), index=False)",
"def to_csv(self):\n if not self._fitted:\n self.fit()\n #self._message(\"Saving results into a csv (comma separated values) file.\")\n v=np.array([list(self.initialConcentration.values()),\n list(self.fitting_error.values()),\n list(self.k.values()),\n list(self.Fb.values()),\n list(self.slope.values())]).T\n k=list(self.initialConcentration.keys())\n d=pd.DataFrame(v,columns=['Initial Concentration','Fitting Error','k','Fb','Slope'],index=k)\n fn=get_valid_fname(self.ID)\n self.csvname=\"%s_initial_concentrations.csv\"%(fn)\n self.fullcsvname=\"%s/%s_initial_concentrations.csv\"%(self.info['resultsdir'],fn)\n self.info['csvname_initialConcentration']=self.csvname\n print(self.csvname)\n d.to_csv('%s/%s'%(self.info['resultsdir'],self.csvname))",
"def csv_file(data,output_dir,filename,order = [],head = True):\n with open(output_dir + filename + '.csv', 'w') as f:\n write = csv.writer(f)\n write.writerows(manip.dic_to_list(data,order,head),)\n return None",
"def write_to_csv(self, data_points):\n keys = data_points[0].keys()\n with open(self.report_path, 'w') as output_file:\n dict_writer = csv.DictWriter(output_file, keys)\n dict_writer.writeheader()\n dict_writer.writerows(data_points)",
"def store_as_csv(dataset, task_cls, path):\n dataset_path = str(Path.cwd()) + path\n\n # Create path if it doesn't exist\n Path(dataset_path).mkdir(parents=True, exist_ok=True)\n\n file = dataset_path + dataset.get('dataset_name') + '.csv'\n\n tasks = dataset.get('tasks')\n list_task_dicts = list()\n\n ordered_tasks = collections.OrderedDict(sorted(tasks.items()))\n\n for task_id, task in ordered_tasks.items():\n csv_dict = task_cls.to_csv(task)\n list_task_dicts.append(csv_dict)\n\n to_csv(list_task_dicts, file)",
"def _CsvFunc(self, obj=None, verbose=False, use_pager=None, to_file=None):\n if obj is not None:\n self._printed_variables.append(obj)\n lines = describe.GenerateLines(obj, verbose=verbose, recursive=False,\n format_name='csv')\n _WriteToStream(lines, use_pager=use_pager, to_file=to_file)",
"def write_results(results):\n with RESULTS_PATH.open(\"w\") as writer:\n csvwriter = csv.writer(writer)\n csvwriter.writerows(results)",
"def dump(self):\n try:\n _file = FileOps.join_path(TaskOps().step_path, \"reports.csv\")\n FileOps.make_base_dir(_file)\n data = self.all_records\n data_dict = {}\n for step in data:\n step_data = step.serialize().items()\n for k, v in step_data:\n if k in data_dict:\n data_dict[k].append(v)\n else:\n data_dict[k] = [v]\n\n data = pd.DataFrame(data_dict)\n data.to_csv(_file, index=False)\n _file = os.path.join(TaskOps().step_path, \".reports\")\n _dump_data = [ReportServer._hist_records, ReportServer.__instances__]\n with open(_file, \"wb\") as f:\n pickle.dump(_dump_data, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n self.backup_output_path()\n except Exception:\n logging.warning(traceback.format_exc())",
"def export_csv(state, out_file=None):\n\n if out_file is None:\n csvfile = sys.stdout\n else:\n csvfile = open(out_file, 'w')\n\n try:\n writer = csv.writer(csvfile)\n for grade in state.grades:\n writer.writerow([grade.student_name(), grade.score(),\n grade.breakdown(state.user_name)])\n finally:\n if out_file is not None:\n csvfile.close()",
"def log_results(self, path):\n pd.DataFrame(self.results).to_csv(path)",
"def save_report_data(results):\n if os.path.isfile(FEED_DATA_FILE):\n pass\n\n csv_file = open(FEED_DATA_FILE, 'wt', encoding='utf-8')\n writer = csv.writer(csv_file, lineterminator='\\n')\n\n for report in results.get('reports', []):\n column_header = report.get('columnHeader', {})\n dimension_headers = column_header.get('dimensions', [])\n metric_headers = column_header.get(\n 'metricHeader', {},\n ).get('metricHeaderEntries', [])\n rows = report.get('data', {}).get('rows', [])\n\n header_row = []\n header_row.extend(dimension_headers)\n header_row.extend([mh['name'] for mh in metric_headers])\n\n logger.debug(header_row)\n writer.writerow(header_row)\n\n for row in rows:\n dimensions_data = row.get('dimensions', [])\n access_date = ''.join(dimensions_data[0])\n _date: date = datetime.strptime(access_date, '%Y%m%d').date()\n metrics_data = [m['values'] for m in row.get('metrics', [])][0]\n\n data_row: List[str] = [str(_date)]\n data_row.extend(metrics_data)\n logger.debug(data_row)\n writer.writerow(data_row)\n\n # Close the file.\n csv_file.close()",
"def write_output_csv(filename, **kwargs):\n import csv\n import time\n\n intermediate = kwargs.pop(\"intermediate\", False)\n\n keys = sorted(kwargs.keys())\n num_vars = len(keys)\n\n if intermediate:\n full_filename = filename + \"_interm\"\n else:\n dot_index = filename.rfind('.')\n if dot_index != -1:\n full_filename = (filename[:dot_index]\n + time.strftime(\"%Y-%m-%d-%H.%M.%S\")\n + filename[dot_index:])\n else:\n full_filename = filename + time.strftime(\"%Y-%m-%d-%H.%M.%S\")\n\n # add current time to filename as an identifier\n with open(full_filename, 'w', newline='') as csvfile:\n\n writer = csv.writer(csvfile)\n\n # write header\n writer.writerow(keys)\n\n num_entries = len(kwargs[keys[0]])\n for i in range(num_entries):\n writer.writerow(kwargs[keys[j]][i] for j in range(num_vars))"
] | [
"0.67913586",
"0.6477758",
"0.6425062",
"0.62495226",
"0.6236619",
"0.6227798",
"0.6211598",
"0.61715055",
"0.6133314",
"0.61221945",
"0.6114023",
"0.6110165",
"0.60603863",
"0.6050782",
"0.6025989",
"0.5951282",
"0.5934519",
"0.5933892",
"0.59329176",
"0.59259146",
"0.59179616",
"0.5915663",
"0.59051377",
"0.58855027",
"0.5841706",
"0.58340806",
"0.5833926",
"0.58331996",
"0.58121496",
"0.5781988"
] | 0.75044805 | 0 |
Append a metric value (MetricValue) to the list. MetricValue already has a timestamp attribute. | def append_to_list(self, metric_value_to_append):
if type(metric_value_to_append)==MetricValue:
self.__metric_value_list.append(metric_value_to_append)
else:
print("appended object must be a MetricValue, metric_value_to_append=",metric_value_to_append)
sys.exit() # stop entire program, because metric_value_to_append MUST be correct | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def append(self, value):\n self.values.append(value)\n return value",
"def log(self, metric_name: str, value: float) -> None:\n if metric_name in self.metrics:\n self.metrics[metric_name].append(value)\n else:\n self.metrics[metric_name] = [value]",
"def append(self, val):\n self._values.push(val)",
"def _append_value(self, stream, value):\n if FLAGS.timestamp:\n x_val = float(time.time())\n stream['x'].append(x_val)\n\n y_val = float(value)\n stream['y'].append(y_val)",
"def push(self, value):\n self.values.append((time.time(), value))",
"def append(self, value):\n self.list.append(value)",
"def append(self, value):\n\n list.append(self, value)\n self.changed()",
"def append(self, value):\n self.__list += [value]\n return self.__list",
"def append_value(self, value):\n self.value += value",
"def add(self, key, timestamp, value, \n retentionSecs=None, labels={}):\n params = [key, timestamp, value]\n self.appendRetention(params, retentionSecs)\n self.appendLabels(params, labels)\n\n return self.execute_command(self.ADD_CMD, *params)",
"def put(self, metric, values, timestamp=None):\n if timestamp is None:\n timestamp = time.time()\n now_date = datetime.datetime.fromtimestamp(timestamp)\n\n if self.last is None:\n self.last = timestamp\n return\n\n self.last = timestamp\n\n values = [str(d) for d in [now_date, timestamp]+values]\n\n with open(self.filename, \"at\") as df:\n df.write(\"{}\\n\".format(\",\".join(values)))",
"def add(self, value):\n self._resolve_copies()\n self.data.append(value)",
"def value(self,value):\n if math.isnan(value):\n return\n self.__append(value)",
"def append(self, value) -> None:\n key = getattr(value, self.keyattr)\n if callable(key):\n key = key()\n if key not in self.data:\n self.data[key] = []\n self.data[key].append(value)\n self.size += 1",
"def add(self, key, values):\n self.watchlists[key] = list(enumerate(values))",
"def append(self, value):\n self.__field.validate_element(value)\n return list.append(self, value)",
"def write_metric(self, metric_name: str, metric_value: Union[float, int]):\n self._metrics.append(Metric(metric_name, metric_value))",
"def append(self, value: object) -> None:\n self.data.append(value)",
"def add_value(self, key, value):\r\n if key in self:\r\n # We already have this key on the item.\r\n if not isinstance(self[key], list):\r\n # The key isn't already a list, take its current value and\r\n # convert it to a list with the only member being the\r\n # current value.\r\n self[key] = [self[key]]\r\n # Add the new value to the list.\r\n self[key].append(value)\r\n else:\r\n # This is a new attribute, just set it.\r\n self[key] = value",
"def insert(self, value):\n current_timestamp = time.time()\n self.timestamps.append(current_timestamp)\n self.dataBuffer.append(value)\n if (self.timestamps[0] < (current_timestamp - self.seconds_back)):\n self.dataBuffer.pop(0)\n self.timestamps.pop(0)",
"def append(self, val):\n self.val.append(val)",
"def add(self, value):\n pass",
"def add(self, value):\n self.arr.append(value)",
"def append(self, value):\n self.__field.validate_element(value)\n return list.append(self, value)",
"def add_metric(self, metric_name: str, metric_val: typing.Any):\n self.add_metrics({metric_name: metric_val})",
"def AddSample(self, machine, timestamp, value):\n self.machine_data.setdefault(machine, list()).append([timestamp, value])\n if len(self.cluster_total) == 0 or timestamp > self.cluster_total[-1][0]:\n self.cluster_total.append([timestamp, 0])\n self.cluster_avg.append([timestamp, 0])\n self.cluster_total[-1][1] += value\n self.cluster_avg[-1][1] = self.cluster_total[-1][1] / float(len(self.machine_data))",
"def addXValue(self,value):\n self.__XValue.append(value)\n self.updateValue()",
"def add_metric(self, metric):\n self.metrics.append(metric)\n self.estimate()",
"def add(self, value):\n ind = self._ind % self.shape[0]\n self._values[ind] = value\n self._ind += 1\n self._cached = False",
"def append(self, value) :\r\n global TypeRanges, NumericToArray\r\n a = self.impl\r\n if self.complex : # complex, append twice!\r\n if type(value) == complex :\r\n a.append(value.real)\r\n a.append(value.imag)\r\n else :\r\n a.append(float(value))\r\n a.append(0.0)\r\n \r\n else :\r\n a.append(self._crop(value))"
] | [
"0.6807644",
"0.67213684",
"0.6580341",
"0.65766835",
"0.6570106",
"0.6512985",
"0.6349774",
"0.62712246",
"0.62586963",
"0.6248036",
"0.6227115",
"0.62080497",
"0.6151301",
"0.6121657",
"0.6113878",
"0.60947984",
"0.6074452",
"0.60592484",
"0.6024408",
"0.6023302",
"0.6000479",
"0.5948073",
"0.5930859",
"0.59064037",
"0.5902631",
"0.5898476",
"0.58792424",
"0.5867347",
"0.5859045",
"0.58554953"
] | 0.7759538 | 0 |
Return a list of strings with metric values and timestamps as prefixes (not showing microseconds). Also show the metric def ID in parentheses. | def get_timestamped_metric_values_as_strings(self):
ret_list = []
i = 0
while i < len(self.__metric_value_list):
ret_list.append(self.__metric_value_list[i].timestamp.strftime("%Y-%m-%d %H:%M:%S") + " " +
str(self.__metric_value_list[i].value) +
"(" + str(self.__metric_value_list[i].metric_def_ID) + ")")
i += 1
return ret_list | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_metric_list(self) -> List[str]:\n ...",
"def get_metric(ms):\n\treturn '['+','.join(str(m) for m in ms)+']'",
"def __str__(self):\n columns = list(self.metrics.keys())\n columns.sort()\n out = '%s\\n' % ','.join(columns)\n values = [str(self.metrics[c]) for c in columns]\n out += '%s\\n' % ','.join(values)\n return out",
"def tickStrings(values, scale, spacing):\n # sending a list of values in format \"HH:MM:SS.SS\" generated from Total seconds.\n return [(int2dt(value).strftime(\"%H:%M:%S.%f\"))[:-4] for value in values]",
"def list_metrics(self):\n results = []\n if self.r.exists(self.metrics_key):\n keys = self.r.smembers(self.metrics_key)\n for k in keys:\n # metric_key, metric_type, metric_name, metric_help = keys.split(\" \", 3)\n results.append(k.split(\" \", 3))\n return results",
"def supported_metrics(cls) -> List[str]:\n ...",
"def get_labels(self):\n return [\"00:00\", \"04:00\", \"08:00\", \"12:00\", \"16:00\", \"20:00\", \"00:00\"]",
"def get_labels(self):\n return [\"00:00\", \"04:00\", \"08:00\", \"12:00\", \"16:00\", \"20:00\", \"00:00\"]",
"def get_labels(self):\n return [\"00:00\", \"04:00\", \"08:00\", \"12:00\", \"16:00\", \"20:00\", \"00:00\"]",
"def emit_metric(ts, data):\n clean = [cleaner(d) for d in data]\n # print clean\n # check that clean[0] do not start with a number\n (n0,v0) = clean[0]\n if n0 is not None:\n # print 'error: do not understand metric' \n return\n\n if len(clean) == 2:\n (n1,v1) = clean[1]\n return '{0}.{1} {2} {3}'.format(v0, v1, ts, n1)\n elif len(clean) == 3:\n (n1,v1) = clean[1]\n (n2,v2) = clean[2]\n return '{0}.{1}.{2} {3} {4}'.format(v0, v1, v2, ts, n2)",
"def log_metric(name, values, tags={}):\n value_list = []\n for key in sorted(values.keys()):\n value = values[key]\n value_list.append(f\"{key}:{value:7.3f}\")\n values = \", \".join(value_list)\n tag_list = []\n for key, tag in tags.items():\n tag_list.append(f\"{key}:{tag}\")\n tags = \", \".join(tag_list)\n print(\"{name:30s} - {values} ({tags})\".format(name=name, values=values, tags=tags))",
"def tracked_metrics(self) -> list:\n metric_names = [\"loss\"]\n if self.metrics_map is not None:\n metric_names.extend([key for key in self.metrics_map.keys()])\n return metric_names",
"def get_influx_DB_write_string_from_metric_data(metric, metric_vals_at_bins, bin_times):\n # vals_at_bins = [ [ (val, groups), (val, groups), ...], ... ] where groups = {'Owner':'trjones',...}\n measurement = metric[MetricsFields.MEASUREMENT_NAME]\n metric_string = \"\"\n for i in range(len(metric_vals_at_bins)):\n for pair in metric_vals_at_bins[i]:\n val = pair[0]\n groups = pair[1]\n tag_segment = ','.join([label + '=' + groups[label] for label in groups])\n line = measurement + \",\" + tag_segment + \" value=\" + str(val) + \" \" + str(bin_times[i])\n metric_string += line + \"\\n\"\n return metric_string[:-1] # remove trailing newline",
"def list_metrics(self):\n pass",
"def get_timestamped_strings(self):\n ret_list = []\n i = 0\n while i < len(self.__string_list):\n ret_list.append(self.__timestamp_list[i].strftime(\"%Y-%m-%d %H:%M:%S\")+\" \"+self.__string_list[i])\n i += 1\n return ret_list",
"def _metric_tags(self):\r\n tags = [\r\n u'{}.{}:{}'.format(self.__class__.__name__, attr, self[attr])\r\n for attr in self.metric_tag_fields\r\n if attr in self.attributes\r\n ]\r\n tags.append(u'model_class:{}'.format(self.__class__.__name__))\r\n return tags",
"def get_list_data(self):\n key = 'timer'\n if self.repeated:\n key += '_repeat'\n return '%s %s' % (key, self.data.get_list_data())",
"def metrics(self):\n if not self.df:\n return []\n\n column_metric_strings = [col.split(self.sep)[0] for col in self.df.columns]\n\n metrics = set()\n for colstring in column_metric_strings:\n try:\n metrics.add(Metric(colstring))\n except ValueError:\n continue\n\n return sorted(list(set(metrics)))",
"def list_definition(self):\n return self._get(path='metrics')",
"def get_metric_fn_and_keys():\n\n def normalize_value(inst: dict):\n val = int(inst[\"output_layer\"][0])\n return tuple([val]) # returns a tuple.\n\n return normalize_value, [\"val\"] # key order must match.",
"def report_metrics(prefix, metrics):\n series = []\n\n now = time.time()\n for key, value in metrics.iteritems():\n metric = '{prefix}.{key}'.format(prefix=prefix, key=key)\n point = [(now, value)]\n series.append({'metric':metric, 'points':point})\n\n if len(series) > 0:\n print u\"Sending {}\".format(series)\n dog_http_api.metrics(series)",
"def get_time_strs(self):\n\n log(\"Getting time strings starting at {}\".format(self._t0))\n tz = dt.timezone.utc\n mkdt = lambda n: dt.datetime.fromtimestamp(\n self._t0 - (self._delta * n),\n tz=tz\n )\n ns = range(self._frames, 0, -1)\n return [mkdt(n).strftime('%Y%m%d%H%M') for n in ns]",
"def build_metrics_gauge_data(gauge_metrics):\n return [{'name': name, 'value': value} for name, value in iteritems(gauge_metrics)]",
"def build_metrics_times_data(time_metrics):\n return [{'name': name, 'latencies': latencies.get_latencies()}\n for name, latencies in iteritems(time_metrics)]",
"def perfcounter_to_str(val):\n return f\"{math.floor(val / 60)}m {math.floor(val % 60)}s {math.floor((val % 1) * 1000)}ms\"",
"def __str__(self):\n return \"\"\"TimeSeries(%s)\"\"\" % \",\".join([str(entry) for entry in self._timeseriesData])",
"def _timestamp(self):\n\n retval = []\n\n if self.log_level >= _Log.DEBUG:\n retval.append('%f: ' % (time.time() - self.start_time,))\n\n return ''.join(retval)",
"def metric_results_to_string(list_scores=None, list_cutoffs=None, split_str=', '):\n list_str = []\n for i in range(len(list_scores)):\n list_str.append('nDCG@{}:{:.4f}'.format(list_cutoffs[i], list_scores[i]))\n return split_str.join(list_str)",
"def get_timescale_stringlist(self):\n return text_timescale",
"def output_metrics(self):\n print('')\n for key in sorted(self.metrics):\n print('{}:'.format(key), end='')\n for k, v in self.metrics[key].items():\n if type(v[-1]) is list:\n print('\\t' + k + ': ' + ''.join('{:5.3f} '.format(vs) for vs in v[-1]), end='')\n else:\n print('\\t{}: {:5.3f}'.format(k, v[-1]), end='')\n print('\\n', end='')"
] | [
"0.7170015",
"0.7163434",
"0.614878",
"0.598899",
"0.5964182",
"0.5915469",
"0.5894196",
"0.5894196",
"0.5894196",
"0.58587414",
"0.58564055",
"0.5797499",
"0.5731772",
"0.5715355",
"0.57042557",
"0.56763",
"0.5675049",
"0.56671524",
"0.565569",
"0.5654457",
"0.5632019",
"0.5623292",
"0.56080914",
"0.559845",
"0.5598101",
"0.5580598",
"0.55702",
"0.55635744",
"0.555327",
"0.5537504"
] | 0.73370224 | 0 |
Generic function to dump all Test Execution data in a CSV file. | def write_to_csv(self):
dump_list = []
# add rows one by one, each as a list, even if only 1 element
dump_list.append(["test execution ID",self.ID])
dump_list.append(["test execution name",self.name])
dump_list.append(["test definition ID",self.test_def_ID])
test_def_name = get_indexed_item_from_file(self.test_def_ID, FILE_TEST_DEFINITIONS)
dump_list.append(["test definition name",test_def_name])
dump_list.append(["associated challenge execution ID",self.challenge_exec_ID])
dump_list.append(["user ID",self.user_ID])
if self.start_time != None:
dump_list.append(["test start time",self.start_time.strftime("%Y-%m-%d %H:%M:%S")])
if self.finish_time != None:
dump_list.append(["test finish time",self.finish_time.strftime("%Y-%m-%d %H:%M:%S")])
if self.challenge_start_time != None:
dump_list.append(["challenge stop time",self.challenge_start_time.strftime("%Y-%m-%d %H:%M:%S")])
if self.restoration_detection_time != None:
dump_list.append(["restoration detection time",self.restoration_detection_time.strftime("%Y-%m-%d %H:%M:%S")])
if self.recovery_time != None:
if self.recovery_time.value != None:
if type(self.recovery_time.value)==timedelta:
# timedelta: days and seconds are attributes, total_seconds() is a method
dump_list.append(["MEASURED RECOVERY TIME (s)",self.recovery_time.value.total_seconds()])
rtday = self.recovery_time.value.days
rthrs = self.recovery_time.value.seconds // 3600
rtmin = (self.recovery_time.value.seconds % 3600) // 60
rtsec = self.recovery_time.value.seconds % 60
rtmil = self.recovery_time.value.microseconds
dump_list.append(["MEASURED RECOVERY TIME (days, hours, mins, seconds, microseconds)",
rtday, rthrs, rtmin, rtsec, rtmil])
if self.associated_metric_values.length() > 0 :
dump_list.append(["Metric Values:"])
for item in self.associated_metric_values.get_timestamped_metric_values_as_strings():
dump_list.append([item])
if self.log.length() > 0 :
dump_list.append(["Log:"])
for item in self.log.get_timestamped_strings():
dump_list.append([item])
if self.CLI_responses.length() > 0 :
dump_list.append(["CLI responses:"])
for item in self.CLI_responses.get_timestamped_strings():
dump_list.append([item])
if self.API_responses.length() > 0 :
dump_list.append(["API responses:"])
for item in self.API_responses.get_timestamped_strings():
dump_list.append([item])
try:
# output CSV file name: testDefExec + ID + start time + .csv
file_name = "testDefExec" + "{0:0=3d}".format(self.test_def_ID) + "-" + self.start_time.strftime("%Y-%m-%d-%H-%M-%S") + ".csv"
with open(file_name, "w", newline="") as file:
csv_file_writer = csv.writer(file)
csv_file_writer.writerows(dump_list)
except Exception as e:
print(type(e), e)
sys.exit() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Dump():\n with open(path.join(MAIN_PATH, INST), \"wb\") as f:\n writer = csv.writer(f, delimiter=\",\")\n\n for inst in instances:\n writer.writerow(inst)\n \n with open(path.join(MAIN_PATH, \"test_instances.csv\"), \"wb\") as f:\n writer = csv.writer(f, delimiter=\",\")\n\n for inst in test_instances:\n writer.writerow(inst)",
"def test_to_file(self):\n with TemporaryDirectory() as tmp:\n df_test = make_simple_dataframe()\n Base = BaseDataClass.from_object(df_test)\n fp_save = os.path.join(tmp, \"test_save.csv\")\n Base.to_file(fp_save)\n assert os.path.exists(fp_save)",
"def dump_all_binaries_to_CSV():\n ## TODO\n timenow = datetime.now()",
"def print(self):\n df = self.gen_test()\n # print(df)\n df.to_csv('some_dated_file.csv', index=False)\n return df",
"def DumpCsv(data):\n \n raise Exception('TBI: Need standard container structure for this to work, cause its flat...')",
"def all(config_file):\n with open(config_file) as f:\n config = json.load(f)\n scenes = get_realsense_scenes(config['realsense_dir'])\n all_dfs = []\n for scene in scenes:\n scene_data = get_data_from_scene(scene)\n logger.info(\"Evaluating - %s\", scene['scene_name'])\n df = run_test_on_scene(scene_data, config)\n all_dfs.append(df)\n\n df = pd.concat(all_dfs, axis=0)\n df = df.reset_index()\n print(df)\n df.to_csv(config['save_csv'])",
"def log_evaluation(tester, name, description):\r\n\tfor dataset, output in tester.preds.items():\r\n\t\tresults = pandas.DataFrame.from_dict(output)\r\n\t\tpath = os.path.join(\r\n\t\t\tEXPERIMENT_PATH, tester.config[\"name\"] + '-' + dataset)\r\n\t\twith open(path + \".csv\", \"w\") as f:\r\n\t\t\tresults.to_csv(f, sep=\"\\t\", encoding='utf-8',\r\n\t\t\t\tfloat_format='%.3f', index=False)",
"def write_csv(self):\n with open(paths.CSV_FILE, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n assg = AssignmentConfig().get_assignment()\n writer.writerow([\"Student\"] + assg.get_test_list() + assg.get_programs_list() +\n [\"normalised_test_score\"] + [\"normalised_prog_score\"] + [\"total\"] + [\"total_rounded\"])\n\n for (submitter, submitter_data) in sorted(self.snapshot['results'].items()):\n total_score = submitter_data[\"normalised_test_score\"] + submitter_data[\"normalised_prog_score\"]\n total_rounded = round(total_score * 2) / 2 # total score rounded to nearest 0.5\n writer.writerow([submitter] +\n [submitter_data[\"tests\"][test] for test in sorted(submitter_data[\"tests\"])] +\n [submitter_data[\"progs\"][prog] for prog in sorted(submitter_data[\"progs\"])] +\n [submitter_data[\"normalised_test_score\"]] +\n [submitter_data[\"normalised_prog_score\"]] +\n [round(total_score, 2)] +\n [total_rounded])",
"def create_csv(self):\n try:\n # Convert List of Lists to DataFrame and write it to a CSV\n pd.DataFrame(self.data, columns=self.header) \\\n .to_csv(os.path.join(self.file_path, self.file_name), index=False)\n self.successful_run = True\n except:\n # TODO create Exception Handling\n raise",
"def export_data(self, pth):\n self.cleanup_allowed = False\n self.train_df.to_csv(os.path.join(pth, \"train.csv\"))\n self.valid_df.to_csv(os.path.join(pth, \"valid.csv\"))\n self.test_df.to_csv(os.path.join(pth, \"test.csv\"))",
"def dump(self):\n try:\n _file = FileOps.join_path(TaskOps().step_path, \"reports.csv\")\n FileOps.make_base_dir(_file)\n data = self.all_records\n data_dict = {}\n for step in data:\n step_data = step.serialize().items()\n for k, v in step_data:\n if k in data_dict:\n data_dict[k].append(v)\n else:\n data_dict[k] = [v]\n\n data = pd.DataFrame(data_dict)\n data.to_csv(_file, index=False)\n _file = os.path.join(TaskOps().step_path, \".reports\")\n _dump_data = [ReportServer._hist_records, ReportServer.__instances__]\n with open(_file, \"wb\") as f:\n pickle.dump(_dump_data, f, protocol=pickle.HIGHEST_PROTOCOL)\n\n self.backup_output_path()\n except Exception:\n logging.warning(traceback.format_exc())",
"def write_to_csv(self):\n\n dump_list = []\n\n # add rows one by one, each as a list, even if only 1 element\n\n dump_list.append([\"challenge execution ID\",self.ID])\n dump_list.append([\"challenge execution name\",self.name])\n\n dump_list.append([\"challenge definition ID\",self.challenge_def_ID])\n challenge_def_name = get_indexed_item_from_file(self.challenge_def_ID, FILE_CHALLENGE_DEFINITIONS)\n dump_list.append([\"challenge definition name\",challenge_def_name])\n\n if self.start_time != None:\n dump_list.append([\"challenge start time\",self.start_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n if self.stop_time != None:\n dump_list.append([\"challenge stop time\",self.stop_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n\n if self.log.length() > 0 :\n dump_list.append([\"Log:\"])\n for item in self.log.get_timestamped_strings():\n dump_list.append([item])\n\n if self.CLI_responses.length() > 0 :\n dump_list.append([\"CLI responses:\"])\n for item in self.CLI_responses.get_timestamped_strings():\n dump_list.append([item])\n\n if self.API_responses.length() > 0 :\n dump_list.append([\"API responses:\"])\n for item in self.API_responses.get_timestamped_strings():\n dump_list.append([item])\n\n try:\n # output CSV file name: challDefExec + ID + start time + .csv\n file_name = \"challDefExec\" + \"{0:0=3d}\".format(self.challenge_def_ID) + \"-\" + self.start_time.strftime(\"%Y-%m-%d-%H-%M-%S\") + \".csv\"\n with open(file_name, \"w\", newline=\"\") as file:\n csv_file_writer = csv.writer(file)\n csv_file_writer.writerows(dump_list)\n except Exception as e:\n print(type(e), e)\n sys.exit()",
"def test_csv_writes(self):\n counter = testdata.get_counter()\n csvfile = testdata.create_csv({\n \"foo\": counter,\n \"bar\": testdata.get_words,\n })\n\n for row in csvfile:\n for k in [\"foo\", \"bar\"]:\n self.assertTrue(k in row)\n self.assertTrue(row[k])",
"def create_test_csv():\n if os.path.exists(args.test):\n print(\"--Traffic input for analysis found: \", args.test)\n #quick and dirty create csv file\n headers = os.system(\"echo idorigh,idresph,origbytes,respbytes,origpkts,resppkts,duration > test.csv\")\n brocut = os.system(\"cat \"+str(args.test)+\"| bro-cut id.orig_h id.resp_h orig_bytes resp_bytes orig_pkts resp_pkts duration | sed 's/\t/\\,/g' | sed '/-/d'>> test.csv\")\n \n else:\n print(\"Bro testing data input \"+str(args.test)+\" not found - needs to be in working directory\")\n exit()",
"def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )",
"def _export_data_to_csv(self, source, target):\n self.log.info(f\"Dumping data into {target}\")\n source.to_csv(target, index=False)",
"def log_results(self, path):\n pd.DataFrame(self.results).to_csv(path)",
"def write_test_data(sql):\n for fname in sorted(glob.glob(\"mock_data/*.csv\")):\n print(fname)\n with open(fname, 'r', encoding='utf8') as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\", quoting=csv.QUOTE_MINIMAL)\n i = 0\n for row in reader:\n if i == 0:\n if row != '' and ''.join(row) != '':\n sql.write(\"INSERT INTO \" + \"_\".join(fname.split('_')[2:])[:-4] + commajoin(row, [], 0) + \" VALUES\\n\")\n else:\n sql.write(\"INSERT INTO \" + \"_\".join(fname.split('_')[2:])[:-4] + \" VALUES\\n\")\n i += 1\n continue\n if row == '' or ''.join(row) == '':\n continue\n if i > 1:\n sql.write(\",\\n\")\n sql.write(commajoin(row, list(range(len(row))), 4))\n i += 1\n sql.write(\";\\n\\n\")",
"def test_write_race_results_to_csv():\n number = random.randint(1, 3)\n f1.write_race_results_to_csv(number)\n with open(f\"race_{number}_results.csv\", newline='') as csvfile:\n reader = csv.reader(csvfile, delimiter=\",\")\n for row in reader:\n assert len(row) == 7",
"def dump_to_csv(self, function, path, kwargs=None):\n if not kwargs:\n kwargs = {}\n df_to_dump = function(self.df, **kwargs)\n df_to_dump.to_csv(path)",
"def test_53_export_task_runs_csv(self):\r\n Fixtures.create()\r\n # First test for a non-existant app\r\n uri = '/app/somethingnotexists/tasks/export'\r\n res = self.app.get(uri, follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.status\r\n # Now get the tasks in CSV format\r\n uri = \"/app/somethingnotexists/tasks/export?type=tas&format=csv\"\r\n res = self.app.get(uri, follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.status\r\n\r\n # Now with a real app\r\n uri = '/app/%s/tasks/export' % Fixtures.app_short_name\r\n res = self.app.get(uri, follow_redirects=True)\r\n heading = \"<strong>%s</strong>: Export All Tasks and Task Runs\" % Fixtures.app_name\r\n assert heading in res.data, \"Export page should be available\\n %s\" % res.data\r\n # Now get the tasks in CSV format\r\n uri = \"/app/%s/tasks/export?type=task_run&format=csv\" % Fixtures.app_short_name\r\n res = self.app.get(uri, follow_redirects=True)\r\n csv_content = StringIO.StringIO(res.data)\r\n csvreader = unicode_csv_reader(csv_content)\r\n app = db.session.query(App)\\\r\n .filter_by(short_name=Fixtures.app_short_name)\\\r\n .first()\r\n exported_task_runs = []\r\n n = 0\r\n for row in csvreader:\r\n if n != 0:\r\n exported_task_runs.append(row)\r\n n = n + 1\r\n err_msg = \"The number of exported task runs is different \\\r\n from App Tasks Runs\"\r\n assert len(exported_task_runs) == len(app.task_runs), err_msg",
"def to_csv(self, path):\n results = self.all()\n if self.stop_check is not None and self.stop_check():\n return\n results.to_csv(path)",
"def etl_operations():\n tap = SQLTaps(db_type='mysql',\n username='root',\n password='',\n host='localhost',\n db_name='ETLtestDb')\n\n conn = tap.get_connection()\n\n query = 'SELECT id, filename, student_xml FROM StudentsData'\n\n rows = tap.get_rows(conn, query)\n\n rows_json = tap.covert_ResultProxy_to_JSON(rows)\n\n result_list = rows_json.get('result')\n converter = Convert()\n\n csv_row_list = list()\n\n headers = list()\n\n for row in result_list:\n xml_content = base64.b64decode(row.get('student_xml').encode())\n csv_content = converter.xml_to_csv(xml_content)\n headers = csv_content.get('columns')\n csv_row_list.append(csv_content.get('values'))\n\n csv_target('students.csv', csv_row_list, headers)",
"def dump(self, filename=None):\n if filename is None:\n current_datetime = datetime.datetime.now()\n filename = current_datetime.strftime(\"results_%Y%m%d%H%M%S.csv\")\n print(\"Writing results to \\\"{}\\\"\".format(filename))\n identifiers = \"\"\n values = \"\"\n for entry in self.log.items():\n identifiers += ((\";\" if len(identifiers) > 0 else \"\")\n + str(entry[0]))\n values += \";\" + str(entry[1]) if len(values) > 0 else str(entry[1])\n with open(filename, 'a') as f:\n f.write(identifiers + \"\\n\")\n f.write(values + \"\\n\")",
"def test_model(det_model, path='data/test'):\n result = []\n i = 0\n tests = sorted(os.listdir(path))\n for tst_file in tests:\n print(tst_file)\n test_data = pd.read_csv(os.path.join(path, tst_file), index_col=0)\n res = det_model.test(test_data)\n result.append((i, res,))\n i += 1\n\n with open('output/{0}.csv'.format(det_model), 'w+') as output:\n writer = csv.writer(output, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n writer.writerows(result)",
"def save_csv(self, filename: str, type='n', **args):\n if type == 'n':\n df = self.export_nodes()\n else:\n df = self.export_edges()\n df.to_csv(filename, index=False)",
"def save_to_csv(self):\n path = partial(os.path.join, 'datasets')\n save_name = self.name.lower().replace(' ', '_')\n self.df['values'].sum(axis=1).to_csv(path('{0}_values.csv'.format(save_name)))\n self.df['allocations'].to_csv(path('{0}_allocations.csv'.format(save_name)))\n self.df['returns'].to_csv(path('{0}_returns.csv'.format(save_name)))\n self.trades.to_csv(path('{0}_trades.csv'.format(save_name)))",
"def run_tests():\n with open(FILENAME) as file:\n\n # Loads the test hyper-parameters as dictionaries.\n tests = yaml.safe_load(file)\n \n # create a dataframe to keep the results\n test_dict = tests['Tests']\n results = pd.DataFrame(test_dict)\n results[\"Episode\"] = \"\"\n results['Max average score'] = \"\"\n\n for i, test in enumerate(tests['Tests']):\n\n env = gym.make(test['env'])\n env.reset()\n\n actor_critic = ActorCritic(env, test['episodes'], test['max_score'], \n test['hidden_size'], test['gamma'], test['save'])\n\n ## run training \n best_score, episode, rew_hist = actor_critic.train()\n\n results.loc[i,'Episode'] = episode\n results.loc[i,'Max average score'] = best_score\n\n plot_graphs(test, rew_hist)\n\n # save results to csv file\n filename = 'results/' + 'test_table.csv'\n results.to_csv(filename)\n\n return results",
"def genScheduleCSV():\r\n try: \r\n printSchedule()\r\n save_class_list()\r\n print(\"\\nSchedule generated, check working directory\")\r\n except Exception as e:\r\n print(\"Exception found\" + str(e))",
"def to_csv(self, path):\n for table in ['datasets', 'dataruns', 'hyperpartitions', 'classifiers']:\n df = pd.read_sql('SELECT * FROM %s' % table, self.session.bind)\n df.to_csv(os.path.join(path, '%s.csv' % table), index=False)"
] | [
"0.67293197",
"0.6513849",
"0.6487891",
"0.633611",
"0.62388986",
"0.6189851",
"0.61873883",
"0.6097543",
"0.608892",
"0.6053703",
"0.60277325",
"0.5961769",
"0.5952618",
"0.5923491",
"0.59027004",
"0.588059",
"0.58773726",
"0.5876296",
"0.5853088",
"0.583685",
"0.5753217",
"0.5750302",
"0.57436323",
"0.5723493",
"0.5721366",
"0.5713844",
"0.57068485",
"0.5702311",
"0.5695403",
"0.56835353"
] | 0.7351433 | 0 |
Save the codes and configuration file. During the training, we may modify the codes. It will be problematic when we try to extract embeddings using the old model and the new code. So we save the codes when we train the model and use the saved codes to extract embeddings. | def save_codes_and_config(cont, model, config):
if cont:
# If we want to continue the model training, we need to check the existence of the checkpoint.
if not os.path.isdir(os.path.join(model, "nnet")) or not os.path.isdir(os.path.join(model, "codes")):
sys.exit("To continue training the model, nnet and codes must be existed in %s." % model)
# Simply load the configuration from the saved model.
tf.logging.info("Continue training from %s." % model)
params = Params(os.path.join(model, "nnet/config.json"))
else:
# Save the codes in the model directory so that it is more convenient to extract the embeddings.
# The codes would be changed when we extract the embeddings, making the network loading impossible.
# When we want to extract the embeddings, we should use the code in `model/codes/...`
if os.path.isdir(os.path.join(model, "nnet")):
# Backup the codes and configuration in .backup. Keep the model unchanged.
tf.logging.info("Save backup to %s" % os.path.join(model, ".backup"))
if os.path.isdir(os.path.join(model, ".backup")):
tf.logging.warn("The dir %s exisits. Delete it and continue." % os.path.join(model, ".backup"))
shutil.rmtree(os.path.join(model, ".backup"))
os.makedirs(os.path.join(model, ".backup"))
if os.path.exists(os.path.join(model, "codes")):
shutil.move(os.path.join(model, "codes"), os.path.join(model, ".backup/"))
if os.path.exists(os.path.join(model, "nnet")):
shutil.move(os.path.join(model, "nnet"), os.path.join(model, ".backup/"))
# if os.path.exists(os.path.join(model, "log")):
# copy_tree(os.path.join(model, "log"), os.path.join(model, ".backup/"))
if os.path.exists(os.path.join(model, "lib")):
shutil.move(os.path.join(model, "lib"), os.path.join(model, ".backup/"))
# `model/codes` is used to save the codes and `model/nnet` is used to save the model and configuration
if os.path.isdir(os.path.join(model, "codes")):
shutil.rmtree(os.path.join(model, "codes"))
if os.path.isdir(os.path.join(model, "lib")):
shutil.rmtree(os.path.join(model, "lib"))
os.makedirs(os.path.join(model, "codes"))
# We need to set the home directory of the tf-kaldi-speaker (TF_KALDI_ROOT).
if not os.environ.get('TF_KALDI_ROOT'):
tf.logging.error("TF_KALDI_ROOT should be set before training. Refer to path.sh to set the value manually. ")
quit()
copy_tree(os.path.join(os.environ['TF_KALDI_ROOT'], "dataset"), os.path.join(model, "codes/dataset/"))
copy_tree(os.path.join(os.environ['TF_KALDI_ROOT'], "model"), os.path.join(model, "codes/model/"))
copy_tree(os.path.join(os.environ['TF_KALDI_ROOT'], "misc"), os.path.join(model, "codes/misc/"))
copy_tree(os.path.join(os.getcwd(), "nnet/lib"), os.path.join(model, "lib"))
if not os.path.isdir(os.path.join(model, "nnet")):
os.makedirs(os.path.join(model, "nnet"))
shutil.copyfile(config, os.path.join(model, "nnet", "config.json"))
tf.logging.info("Train the model from scratch.")
params = Params(config)
return params | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def saveOutput(self,code):\r\n\t\tCodeSaver().save(code,self.savePath)",
"def save_model(self):\n filename=self.name + '_words'\n file_write(filename, self.words)\n\n filename2=self.name+'_word_lengths'\n file_write(filename2, self.word_lengths)\n\n filename3=self.name+'_stems'\n file_write(filename3, self.stems)\n\n filename4=self.sentence_lengths+'_sentence_lengths'\n file_write(filename4, self.sentence_lengths)\n\n filename5= self.endings+'_endings'\n file_write(filename5, self.endings)",
"def save_model(self):\n filename = self.name + '_words'\n f = open(filename, 'w') \n f.write(str(self.words)) \n f.close()\n \n filename2 = self.name + '_word_lengths'\n f = open(filename2, 'w') \n f.write(str(self.word_lengths)) \n f.close()\n \n filename3 = self.name + '_stems'\n f = open(filename3, 'w') \n f.write(str(self.stems)) \n f.close()\n \n filename4 = self.name + '_sentence_lengths'\n f = open(filename4, 'w') \n f.write(str(self.sentence_lengths)) \n f.close()\n \n filename5 = self.name + '_punctuation'\n f = open(filename5, 'w') \n f.write(str(self.punctuation)) \n f.close()",
"def save(self):\n pickle.dump([self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word], open(self.save_file, 'wb'), protocol=4)",
"def save_model(self):\n save_folder = os.path.join(self.log_path, \"models\", \"weights_{}\".format(self.epoch))\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n for model_name in [\"encoder\", \"decoder\"]:\n save_path = os.path.join(save_folder, \"{}.pth\".format(model_name))\n if model_name == 'encoder':\n to_save = self.encoder.state_dict()\n # save the sizes - these are needed at prediction time\n to_save['height'] = self.opt.height\n to_save['width'] = self.opt.width\n else:\n to_save = self.decoder.state_dict()\n torch.save(to_save, save_path)\n\n save_path = os.path.join(save_folder, \"{}.pth\".format(\"adam\"))\n torch.save(self.optimizer.state_dict(), save_path)",
"def save_model(self):\r\n jeff = self.name + '_words'\r\n f = open(jeff, 'w')\r\n f.write(str(self.words))\r\n f.close()\r\n \r\n jeph = self.name + '_word_lengths'\r\n f = open(jeph, 'w')\r\n f.write(str(self.word_lengths))\r\n f.close()\r\n \r\n geoff = self.name + '_stems'\r\n f = open(geoff, 'w')\r\n f.write(str(self.stems))\r\n f.close()\r\n \r\n joeff= self.name + '_sentence_lengths'\r\n f = open(joeff, 'w')\r\n f.write(str(self.sentence_lengths))\r\n f.close()\r\n \r\n geoph = self.name + '_punctuation'\r\n f = open(geoph, 'w')\r\n f.write(str(self.punctuation))\r\n f.close()",
"def save_model(self):\n print(\"\\nModels are integrated to be multi scale.\\nSaving to disk.\")\n self.column_names = [ \"x_\" + str(x) for x in range(self.embedding.shape[1])]\n self.embedding = pd.DataFrame(self.embedding, columns = self.column_names)\n self.embedding.to_csv(self.args.output, index = None)",
"def save(self, path):\n save_dict = {\n 'model': {\n 'vocabulary': self.vocabulary,\n 'max_sequence_length': self.max_sequence_length\n },\n 'decorator': {\n 'params': self.network.get_params(),\n 'state': self.network.state_dict()\n }\n }\n torch.save(save_dict, path)",
"def save(self, path: utils.URLPath):\n save_somclassifier_config(self.config, path / \"config.json\")\n self.model.save(str(path / \"model.h5\"))\n io_functions.save_joblib(self.binarizer, path / \"binarizer.joblib\")\n\n io_functions.save_json(self.data_ids[\"validation\"], path / \"ids_validate.json\")\n io_functions.save_json(self.data_ids[\"train\"], path / \"ids_train.json\")",
"def save_model(self, output_dir):\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n logger.info('Saving model')\n dst_config_file = os.path.join(output_dir, self.CONFIG_FILE)\n if self.fullpath_input_configfile != dst_config_file:\n shutil.copy(self.fullpath_input_configfile, dst_config_file)\n\n pickle.dump(self.word_det_rfc,\n open(os.path.join(output_dir, self.WORD_DET_RFC), 'wb'))\n pickle.dump(self.reg_coeffs, open(\n os.path.join(output_dir, self.REGRESSION_PARAMS), 'wb'))",
"def save(self, filename):\n model_dict = {'model_state_dict': self.state_dict(),\n 'init_args': {\"vocab_size\": self.vocab_size,\n \"embeddings_size\": self.embeddings_size,\n \"hidden_size\": self.hidden_size,\n \"mlp_hidden_size\": self.mlp_hidden_size,\n \"dropout\": self.dropout}}\n torch.save(model_dict, filename)",
"def saveSettings(self):\n self.genFiles.applyData()\n self.genGraph.applyData()",
"def save(self) -> None:\n self.saver.save_model_and_weights(self.model)\n self.saver.save_data_shuffle_indices(\n self.data.eval_shuffler.ds_inds\n )\n self.saver.save_input_scaler(self.data.x.scaler)",
"def save(self, filepath):\n save_ckpt = {\n 'ae': self.state_dict(),\n 'optimizer': self.optimizer.state_dict()\n }\n try:\n torch.save(save_ckpt, os.path.join(filepath, 'ckpt_ae.pth'))\n except:\n print('Cannot save autoencoder.')",
"def save_model(self):\n # words dictionary\n filename = self.name + \"_words\"\n f = open(filename, 'w')\n f.write(str(self.words))\n f.close()\n\n # word_lengths dictionary\n filename = self.name + \"_word_lengths\"\n f = open(filename, 'w')\n f.write(str(self.word_lengths))\n f.close()\n\n # stems dictionary\n filename = self.name + \"_stems\"\n f = open(filename, 'w')\n f.write(str(self.stems))\n f.close()\n\n # sentence_lengths dictionary\n filename = self.name + \"_sentence_lengths\"\n f = open(filename, 'w')\n f.write(str(self.sentence_lengths))\n f.close()\n\n # ten most common words\n filename = self.name + \"_common_word\"\n f = open(filename, 'w')\n f.write(str(self.common_word))\n f.close()",
"def save(self):\n if self.loaded:\n full_file_name = self.resource_manager.get_dataset(self.corpus, self.embeddings.vsm_name)\n logging.info('Saving dataset to [%s]', full_file_name)\n with lzma.open(full_file_name, 'wb') as f:\n pickle.dump(self, f)\n else:\n logging.error('Dataset not loaded, call \"build\" method first!')",
"def save_model(self):\n torch.save(self.get_params(), 'code/lr-model.pt')",
"def save_embedding(embedding, save_path=PROJECT_DIR / \"outputs/models/embedding.pkl\"):\n\n make_dir_if_not_exist(save_path.parent)\n with open(save_path, \"wb\") as out:\n pickle.dump(embedding, out)",
"def save(self):\n if self.loaded:\n list_embeddingNames = [self.embeddings.vsm_name, self.synset_embeddings.vsm_name, self.imagined_embeddings.vsm_name]\n full_file_name = self.resource_manager.get_multimodal_dataset(self.corpus, list_embeddingNames)\n logging.info('Saving dataset to [%s]', full_file_name)\n with lzma.open(full_file_name, 'wb') as f:\n pickle.dump(self, f)\n else:\n logging.error('Dataset not loaded, call \"build\" method first!')",
"def save(self):\n data = (\n self.Joints,\n self.Links,\n self.joint_syms,\n self.global_syms,\n self.name,\n self.sym_prefix,\n )\n cloudpickle.dump(data, open(self.save_filename, \"wb\"))",
"def _save_processed_data(self, train_examples, dev_examples, word_vocab, char_vocab):\n with open(os.path.join(self._data_root_path, self._processed_train_data_file_name),\n 'w') as f:\n json.dump(train_examples, f)\n\n with open(os.path.join(self._data_root_path, self._processed_dev_data_file_name), 'w') as f:\n json.dump(dev_examples, f)\n\n with open(os.path.join(self._data_root_path, self._word_vocab_file_name), 'w') as f:\n f.write(word_vocab.to_json())\n\n with open(os.path.join(self._data_root_path, self._char_vocab_file_name), 'w') as f:\n f.write(char_vocab.to_json())",
"def save_model(self):\n f = open(self.name + '_' + 'words', 'w')\n f.write(str(self.words))\n f.close\n\n f = open(self.name + '_' + 'word_lengths', 'w')\n f.write(str(self.word_lengths))\n f.close\n\n f = open(self.name + '_' + 'sentence_lengths', 'w')\n f.write(str(self.sentence_lengths))\n f.close\n\n f = open(self.name + '_' + 'stems', 'w')\n f.write(str(self.stems))\n f.close\n\n f = open(self.name + '_' + 'commas_per_sentence', 'w')\n f.write(str(self.commas_per_sentence))\n f.close",
"def save_model(self):\n\n self.check_model()\n\n with open(self.filename, 'wb') as file:\n pickle.dump({'model': self.model, 'vec': self.vectorizer, 'vec_data': self.vectorized_data,\n 'df': self.df_topic_keywords}, file)",
"def _save_trained_params(self):\n self.trained_model_params = self.sess_train.run([self.ent_emb, self.rel_emb])",
"def save(self,sess):\n self.saver.save(sess,\"./Models/\" + self.mod_name + \".ckpt\")",
"def save_model(self):\n save_folder = os.path.join(self.log_path, \"models\", \"weights_{}\".format(self.epoch))\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n for model_name, model in self.models.items():\n print(\"MODEL NAME = {}\".format(model_name))\n save_path = os.path.join(save_folder, \"{}.pth\".format(model_name))\n to_save = model.state_dict()\n if model_name == 'encoder':\n # save the sizes - these are needed at prediction time\n to_save['height'] = self.height\n to_save['width'] = self.width\n torch.save(to_save, save_path)\n\n save_path = os.path.join(save_folder, \"{}.pth\".format(\"adam\"))\n torch.save(self.model_optimizer.state_dict(), save_path)",
"def save(self, save_dir='models'):\n with open(os.path.join(save_dir, 'model_expert_predictor.pkl'), 'wb') as f:\n pickle.dump(self.model, f)\n with open(os.path.join(save_dir, 'vectorizer_expert_predictor.pkl'), 'wb') as f:\n pickle.dump(self.vectorizer, f)\n with open(os.path.join(save_dir, 'userid2name.pkl'), 'wb') as f:\n pickle.dump(self.userid2name, f)\n with open(os.path.join(save_dir, 'name2userid.pkl'), 'wb') as f:\n pickle.dump(self.name2userid, f)",
"def save_model(self, name: str):\n\n # Saving the current config\n self.cM.create_config(name + \".cfg\")\n\n # Saving all Vocabs\n pickle.dump(self.output_field.vocab, open(name + \".out_voc\", \"wb\"))\n pickle.dump(self.input_field.vocab, open(name + \".in_voc\", \"wb\"))\n\n # Saving the actual network\n if os.path.exists(name + \".auto\"):\n # If auto saving found, simply rename it\n logging.info(f\"Autostopper STOP\")\n os.rename(name + \".auto\", name + \".ph\")\n else:\n self.network.save_model(name + \".ph\")",
"def save_model(self):\n self.pred_net.save((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.save((self.save_path / \"iqn_target_net\").absolute().as_posix())",
"def save_model(cls, vocab, path, filename):\n return super().save_model(vocab, path, filename)"
] | [
"0.66678596",
"0.65298474",
"0.65280056",
"0.65230376",
"0.65037274",
"0.64659584",
"0.6451233",
"0.6426465",
"0.6415432",
"0.63984126",
"0.6397235",
"0.6397206",
"0.6392341",
"0.63780564",
"0.63757557",
"0.6353628",
"0.63431454",
"0.62933993",
"0.627987",
"0.6270623",
"0.6253274",
"0.6232726",
"0.6230188",
"0.62235737",
"0.6186911",
"0.6176143",
"0.61683375",
"0.61632633",
"0.61471945",
"0.61389214"
] | 0.7381167 | 0 |
Get the pretrained model and copy to the target model as the initial version. | def get_pretrain_model(pretrain_model, target_model, checkpoint='-1'):
if not os.path.isfile(os.path.join(pretrain_model, "checkpoint")):
sys.exit("[ERROR] Cannot find checkpoint in %s." % pretrain_model)
ckpt = tf.train.get_checkpoint_state(pretrain_model)
model_checkpoint_path = ckpt.model_checkpoint_path
all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths
if not ckpt or not model_checkpoint_path:
sys.exit("[ERROR] Cannot read checkpoint %s." % os.path.join(pretrain_model, "checkpoint"))
steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]
steps = sorted(steps)
if checkpoint == "last":
tf.logging.info("Load the last saved model.")
checkpoint = steps[-1]
else:
checkpoint = int(checkpoint)
if checkpoint == -1:
tf.logging.info("Load the best model according to valid_loss")
min_epoch = -1
min_loss = 1e10
with open(os.path.join(pretrain_model, "valid_loss")) as f:
for line in f.readlines():
epoch, loss, eer = line.split(" ")
epoch = int(epoch)
loss = float(loss)
if loss < min_loss:
min_loss = loss
min_epoch = epoch
# Add 1 to min_epoch since epoch is 0-based
config_json = os.path.join(pretrain_model, "config.json")
params = Params(config_json)
checkpoint = (min_epoch + 1) * params.num_steps_per_epoch
assert checkpoint in steps, "The checkpoint %d not in the model directory" % checkpoint
pretrain_model_checkpoint_path = model_checkpoint_path.rsplit("-", 1)[0] + "-" + str(checkpoint)
tf.logging.info("Copy the pre-trained model %s as the fine-tuned initialization" % pretrain_model_checkpoint_path)
import glob
for filename in glob.glob(pretrain_model_checkpoint_path + "*"):
bas = os.path.basename(filename).split("-", 1)[0]
ext = os.path.basename(filename).rsplit(".", 1)[1]
shutil.copyfile(filename, os.path.join(target_model, bas + "-0." + ext))
with open(os.path.join(target_model, "checkpoint"), "w") as f:
f.write("model_checkpoint_path: \"%s\"\n" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit("-", 1)[0] + "-0"))
f.write("all_model_checkpoint_paths: \"%s\"\n" % os.path.join(target_model, os.path.basename(model_checkpoint_path).rsplit("-", 1)[0] + "-0"))
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_pretrained_model(destination):\n url = \"https://storage.googleapis.com/download.magenta.tensorflow.org/models/ \\\n arbitrary_style_transfer.tar.gz\"\n\n os.system(\"curl -o arbitrary_style_transfer.tar.gz {0}\".format(url))\n with tarfile.open(\"arbitrary_style_transfer.tar.gz\") as tar:\n if not os.path.exists(destination):\n os.makedirs(destination)\n tar.extractall(destination)",
"def load_model(self):\n if self.ckpt_flag:\n LOG('Skip Loading Pre-trained Model......')\n else:\n if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):\n try:\n LOG('Loading Pre-trained Model at %s' % self.params.pre_trained_from)\n pretrain = torch.load(self.params.pre_trained_from)\n self.network.load_state_dict(pretrain)\n LOG('Pre-trained Model Loaded!')\n except:\n WARNING('Cannot load pre-trained model. Start training......')\n else:\n WARNING('Pre-trained model do not exits. Start training......')",
"def load_model(model, model_index, device=\"cpu\"):\n with open(\"trained_local_model\"+str(model_index), \"rb\") as f_:\n model.load_state_dict(torch.load(f_))\n model.to(device)\n return model",
"def loadModel(self):\n self.model.load_state_dict(torch.load(os.path.join(self.model_save_dir, '{}_trained.pt'.format(self.model_name)), map_location=torch.device(device)))\n return self.model",
"def _get_fallback_model(self) -> BertModel:\n if not self._model_fallback:\n self._model_fallback = BertModel.from_pretrained(\n self._model_directory\n ).eval()\n return self._model_fallback",
"def load_pretrained_model(\n init_param: str,\n model: torch.nn.Module,\n ignore_init_mismatch: bool,\n map_location: str = \"cpu\",\n):\n sps = init_param.split(\":\", 4)\n if len(sps) == 4:\n path, src_key, dst_key, excludes = sps\n elif len(sps) == 3:\n path, src_key, dst_key = sps\n excludes = None\n elif len(sps) == 2:\n path, src_key = sps\n dst_key, excludes = None, None\n else:\n (path,) = sps\n src_key, dst_key, excludes = None, None, None\n if src_key == \"\":\n src_key = None\n if dst_key == \"\":\n dst_key = None\n\n if dst_key is None:\n obj = model\n else:\n\n def get_attr(obj: Any, key: str):\n \"\"\"Get an nested attribute.\n\n >>> class A(torch.nn.Module):\n ... def __init__(self):\n ... super().__init__()\n ... self.linear = torch.nn.Linear(10, 10)\n >>> a = A()\n >>> assert A.linear.weight is get_attr(A, 'linear.weight')\n\n \"\"\"\n if key.strip() == \"\":\n return obj\n for k in key.split(\".\"):\n obj = getattr(obj, k)\n return obj\n\n obj = get_attr(model, dst_key)\n\n src_state = torch.load(path, map_location=map_location)\n if excludes is not None:\n for e in excludes.split(\",\"):\n src_state = {k: v for k, v in src_state.items() if not k.startswith(e)}\n\n if src_key is not None:\n src_state = {\n k[len(src_key) + 1 :]: v\n for k, v in src_state.items()\n if k.startswith(src_key)\n }\n\n dst_state = obj.state_dict()\n if ignore_init_mismatch:\n src_state = filter_state_dict(dst_state, src_state)\n dst_state.update(src_state)\n obj.load_state_dict(dst_state)",
"def load_pretrained_model(\n init_param: str,\n model: torch.nn.Module,\n map_location: str = \"cpu\",\n):\n sps = init_param.split(\":\", 4)\n if len(sps) == 4:\n path, src_key, dst_key, excludes = sps\n elif len(sps) == 3:\n path, src_key, dst_key = sps\n excludes = None\n elif len(sps) == 2:\n path, src_key = sps\n dst_key, excludes = None, None\n else:\n (path,) = sps\n src_key, dst_key, excludes = None, None, None\n if src_key == \"\":\n src_key = None\n if dst_key == \"\":\n dst_key = None\n\n if dst_key is None:\n obj = model\n else:\n\n def get_attr(obj: Any, key: str):\n \"\"\"Get an nested attribute.\n\n >>> class A(torch.nn.Module):\n ... def __init__(self):\n ... super().__init__()\n ... self.linear = torch.nn.Linear(10, 10)\n >>> a = A()\n >>> assert A.linear.weight is get_attr(A, 'linear.weight')\n\n \"\"\"\n if key.strip() == \"\":\n return obj\n for k in key.split(\".\"):\n obj = getattr(obj, k)\n return obj\n\n obj = get_attr(model, dst_key)\n\n src_state = torch.load(path, map_location=map_location)\n if excludes is not None:\n for e in excludes.split(\",\"):\n src_state = {k: v for k, v in src_state.items() if not k.startswith(e)}\n\n if src_key is not None:\n src_state = {\n k[len(src_key) + 1 :]: v\n for k, v in src_state.items()\n if k.startswith(src_key)\n }\n\n # tts.dec.feat_out,tts.dec.prob_out\n\n dst_state = obj.state_dict()\n\n for key in list(src_state.keys()):\n if src_state[key].shape != dst_state[key].shape:\n src_shape = src_state[key].shape\n dst_shape = dst_state[key].shape\n print(f'\"{key}\" shapes do not match:', src_shape, dst_shape)\n if src_shape[0] < dst_shape[0] and src_shape[1:] == dst_shape[1:]:\n print(f'doing partial override of \"{key}\"')\n dst_state[key][:src_shape[0]] = src_state[key]\n del src_state[key]\n\n dst_state.update(src_state)\n obj.load_state_dict(dst_state)",
"def get_model(cls):\n if cls.model == None:\n with open(os.path.join(model_path, 'vdok3_rf.pkl'), 'rb') as inp:\n cls.model = pickle.load(inp)\n return cls.model",
"def model_from_pretrained(model_name,\n model_head = None,\n causal_attention = False,\n silent = False,\n **kwargs):\n checkpoint_manager = CheckpointManager()\n class_name = checkpoint_manager.get_class(model_name)\n if not class_name in name_to_class:\n raise ValueError(f'{class_name} is not a valid Transformer class.')\n \n cls = name_to_class[class_name][0]\n config = ModelConfig(checkpoint_manager.get_config_path(model_name))\n model = cls(config = config, model_head = model_head, causal_attention = causal_attention, **kwargs)\n \n checkpoint_path = checkpoint_manager.get_checkpoint_path(model_name)\n model.load_checkpoint(checkpoint_path, silent = silent)\n \n return model",
"def initialize_trained_model(cls, device):\n model = cls(device=device)\n model_path = os.path.join(_FILE_PREFIX, 'weights/translator_weights_16')\n model.load_state_dict(torch.load(model_path, map_location=device), strict=True)\n print('Loaded model from {}'.format(model_path))\n return model",
"def get_or_create_model(self) -> Model:\n assert self.model_name\n\n print(\"Check if Model exists.\")\n if self.model_name in self.models:\n print(\"Model does exists.\")\n # if get_model(self.model_name).tags['train_py_hash'] == self.get_file_md5(\n # self.source_directory + \"/\" + self.script):\n model = Model(self, name=self.model_name)\n if not os.path.isdir(\"outputs\"):\n model.download(\"outputs\", exist_ok=True)\n return model\n print(\"Model does not exists.\")\n model = self.train_model()\n\n assert model\n if self.show_output:\n print(model.name, model.version, model.url, sep=\"\\n\")\n return model",
"def __pull_model(self):\n\n model = ArmModeler().get(self.name)\n\n if model:\n logger.debug(\"model creating...\")\n self.alpha = model[\"alpha\"]\n self.a = model[\"a\"]\n self.q = model[\"q\"]\n self.d = model[\"d\"]\n self.dh_params = model[\"dh_params\"]\n self.tf_matrices_list = model[\"transform_matrices\"]\n self.jacobian_matrix = model[\"jacobian_matrix\"]\n\n else:\n ArmModeler().create(self.name)\n self.__pull_model()",
"def load(self):\n try:\n if self.model.is_cuda:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \"save_point.pth\")))\n else:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \\\n \"save_point.pth\"), map_location=\"cpu\"))\n except:\n sys.exit(\"Unable to load previous model\")",
"def _load_model_from_trained_params(self):\n self.ent_emb = tf.constant(self.trained_model_params[0])\n self.rel_emb = tf.constant(self.trained_model_params[1])",
"def from_pretrained(cls,\n model_name_or_path: str,\n revision: Optional[str] = DEFAULT_MODEL_REVISION,\n cfg_dict: Config = None,\n device: str = None,\n **kwargs):\n prefetched = kwargs.get('model_prefetched')\n if prefetched is not None:\n kwargs.pop('model_prefetched')\n\n if osp.exists(model_name_or_path):\n local_model_dir = model_name_or_path\n else:\n if prefetched is True:\n raise RuntimeError(\n 'Expecting model is pre-fetched locally, but is not found.'\n )\n local_model_dir = snapshot_download(model_name_or_path, revision)\n logger.info(f'initialize model from {local_model_dir}')\n if cfg_dict is not None:\n cfg = cfg_dict\n else:\n cfg = Config.from_file(\n osp.join(local_model_dir, ModelFile.CONFIGURATION))\n task_name = cfg.task\n if 'task' in kwargs:\n task_name = kwargs.pop('task')\n model_cfg = cfg.model\n if hasattr(model_cfg, 'model_type') and not hasattr(model_cfg, 'type'):\n model_cfg.type = model_cfg.model_type\n model_cfg.model_dir = local_model_dir\n for k, v in kwargs.items():\n model_cfg[k] = v\n if device is not None:\n model_cfg.device = device\n model = build_model(\n model_cfg, task_name=task_name, default_args=kwargs)\n else:\n model = build_model(\n model_cfg, task_name=task_name, default_args=kwargs)\n\n # dynamically add pipeline info to model for pipeline inference\n if hasattr(cfg, 'pipeline'):\n model.pipeline = cfg.pipeline\n\n if not hasattr(model, 'cfg'):\n model.cfg = cfg\n\n model.name = model_name_or_path\n return model",
"def load(self):\r\n # self.model.load_state_dict(torch.load(os.path.join(self.ckpt_dir, 'best_model_state_dict.pt')))\r\n if torch.cuda.is_available():\r\n self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_INN.pt'))\r\n else:\r\n self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_INN.pt'), map_location=torch.device('cpu'))",
"def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n\n config = kwargs.pop(\"config\", None)\n state_dict = kwargs.pop(\"state_dict\", None)\n cache_dir = kwargs.pop(\"cache_dir\", None)\n from_tf = kwargs.pop(\"from_tf\", False)\n from_hf = kwargs.pop(\"from_hf\", False)\n output_loading_info = kwargs.pop(\"output_loading_info\", False)\n default_gpu = kwargs.pop(\"default_gpu\", True)\n\n # Load config\n assert config is not None\n model_kwargs = kwargs\n\n # Load model\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]\n elif os.path.isdir(pretrained_model_name_or_path):\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + \".index\")\n else:\n archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)\n else:\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n archive_file = pretrained_model_name_or_path + \".index\"\n else:\n archive_file = pretrained_model_name_or_path\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(archive_file, cache_dir=cache_dir)\n except EnvironmentError:\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n logger.error(\"Couldn't reach server at '{}' to download pretrained weights.\".format(archive_file))\n else:\n logger.error(\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url but couldn't find any file \"\n \"associated to this path or url.\".format(\n pretrained_model_name_or_path, \", \".join(cls.pretrained_model_archive_map.keys()), archive_file)\n )\n return None\n if default_gpu:\n if resolved_archive_file == archive_file:\n logger.info(\"loading weights file {}\".format(archive_file))\n else:\n logger.info(\"loading weights file {} from cache at {}\".format(archive_file, resolved_archive_file))\n\n # Instantiate model.\n model = cls(config, *model_args, **model_kwargs)\n\n if state_dict is None and not from_tf:\n state_dict = torch.load(resolved_archive_file, map_location=\"cpu\")\n if from_tf:\n # Directly load from a TensorFlow checkpoint\n return cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'\n\n # Convert old format to new format if needed from a PyTorch state_dict\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if \"gamma\" in key:\n new_key = key.replace(\"gamma\", \"weight\")\n if \"beta\" in key:\n new_key = key.replace(\"beta\", \"bias\")\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # Rename Bert parameters for our framework\n # NB: Assume 1 Bert layer is mapped to 1 layer only (cannot be used to init multiple layers)\n old_keys = []\n new_keys = []\n nums = []\n for key in state_dict.keys():\n new_key = None\n if \".layer.\" in key and from_hf:\n num = int(key.split(\".layer.\")[-1].split(\".\")[0])\n if \".attention.\" in key:\n new_key = key.replace(\".layer.%d.attention.\" % num,\n \".layer.%d.attention_\" % config.bert_layer2attn_sublayer.get(str(num), num))\n elif \".intermediate.\" in key:\n new_key = key.replace(\".layer.%d.intermediate.\" % num,\n \".layer.%d.intermediate.\" % config.bert_layer2ff_sublayer.get(str(num), num))\n elif \".output.\" in key:\n new_key = key.replace(\".layer.%d.output.\" % num,\n \".layer.%d.output.\" % config.bert_layer2ff_sublayer.get(str(num), num))\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n nums.append(num)\n for old_key, new_key, _ in sorted(zip(old_keys, new_keys, nums), key=lambda x: x[2], reverse=True):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # Load from a PyTorch state_dict\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, \"_metadata\", None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n def load(module, prefix=\"\"):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs,\n )\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + \".\")\n\n # Make sure we are able to load base models as well as derived models (with heads)\n start_prefix = \"\"\n model_to_load = model\n if not hasattr(model, cls.base_model_prefix) and any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n start_prefix = cls.base_model_prefix + \".\"\n if hasattr(model, cls.base_model_prefix) and not any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n model_to_load = getattr(model, cls.base_model_prefix)\n\n logger.info(start_prefix)\n load(model_to_load, prefix=start_prefix)\n if len(missing_keys) > 0 and default_gpu:\n logger.info(\n \"Weights of {} not initialized from pretrained model: {}\".format(model.__class__.__name__, missing_keys)\n )\n if len(unexpected_keys) > 0 and default_gpu:\n logger.info(\n \"Weights from pretrained model not used in {}: {}\".format(model.__class__.__name__, unexpected_keys)\n )\n if len(error_msgs) > 0 and default_gpu:\n raise RuntimeError(\n \"Error(s) in loading state_dict for {}:\\n\\t{}\".format(model.__class__.__name__, \"\\n\\t\".join(error_msgs))\n )\n\n if hasattr(model, \"tie_weights\"):\n model.tie_weights() # make sure word embedding weights are still tied\n\n # Set model in evaluation mode to desactivate DropOut modules by default\n model.eval()\n\n if output_loading_info:\n loading_info = {\n \"missing_keys\": missing_keys,\n \"unexpected_keys\": unexpected_keys,\n \"error_msgs\": error_msgs,\n }\n return model, loading_info\n\n return model",
"def load_model(self):\n with open(self.args.trained_model, 'rb') as handle:\n self.model_hash = hashlib.sha224(handle.read()).hexdigest()\n\n self.model.load(self.args.trained_model)\n self.logger.debug('Loaded model from %s', self.args.trained_model)\n return",
"def custom_model():\n\t# initialize the model\n\t# load weights from path\n\t# returns model\n\tmodel = mlp.get_training_model()\n\tmodel.load_state_dict(torch.load(\"model_wt.pth\"))\n\treturn model",
"def load_pretrained_model(model, pretrained_model_path, verbose=False):\n\n if isinstance(pretrained_model_path, str):\n if not os.path.exists(pretrained_model_path):\n raise IOError(\n \"Can't find pretrained model: {}\".format(pretrained_model_path)\n )\n\n print(\"Loading checkpoint from '{}'\".format(pretrained_model_path))\n pretrained_state = torch.load(pretrained_model_path)[\"state_dict\"]\n else:\n # incase pretrained model weights are given\n pretrained_state = pretrained_model_path\n\n print(len(pretrained_state), \" keys in pretrained model\")\n\n current_model_state = model.state_dict()\n print(len(current_model_state), \" keys in current model\")\n pretrained_state = {\n key: val\n for key, val in pretrained_state.items()\n if key in current_model_state and val.size() == current_model_state[key].size()\n }\n\n print(\n len(pretrained_state),\n \" keys in pretrained model are available in current model\",\n )\n current_model_state.update(pretrained_state)\n model.load_state_dict(current_model_state)\n\n if verbose:\n non_available_keys_in_pretrained = [\n key\n for key, val in pretrained_state.items()\n if key not in current_model_state\n or val.size() != current_model_state[key].size()\n ]\n non_available_keys_in_current = [\n key\n for key, val in current_model_state.items()\n if key not in pretrained_state or val.size() != pretrained_state[key].size()\n ]\n\n print(\n \"not available keys in pretrained model: \", non_available_keys_in_pretrained\n )\n print(\"not available keys in current model: \", non_available_keys_in_current)\n\n return model",
"def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")",
"def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")",
"def load_pre_trained_model_state(saved_state_file_name):\n saved_state_file_name = saved_state_file_name if saved_state_file_name else \"my_checkpoint.pth\"\n loaded_state = torch.load(saved_state_file_name)\n\n pre_trained_model = models.vgg16(pretrained=True)\n pre_trained_model.name = \"vgg16\"\n\n for param in pre_trained_model.parameters():\n param.requires_grad = False\n\n pre_trained_model.class_to_idx = loaded_state['class_to_idx']\n pre_trained_model.classifier = loaded_state['classifier']\n pre_trained_model.load_state_dict(loaded_state['state_dict'])\n \n return pre_trained_model",
"def get_trained_model(self, Model, X, y, params=None):\n assert self.is_trained, \"You need to train the models before getting them\"\n return self.model_save",
"def load_model():\n global model_tok, model_mlm, model, model_cls\n if model is None:\n model_name_or_path = os.getenv('TRANSFORMER_MODEL', default='distilbert-base-multilingual-cased')\n # 'bert-base-multilingual-cased'\n model_tok = AutoTokenizer.from_pretrained(model_name_or_path)\n model_mlm = AutoModelForMaskedLM.from_pretrained(model_name_or_path)\n model_mlm.eval()\n model = model_mlm.base_model\n\n if isinstance(model_mlm, BertPreTrainedModel):\n model_cls = model_mlm.cls\n elif isinstance(model_mlm, DistilBertPreTrainedModel):\n model_cls = nn.Sequential(\n model_mlm.vocab_transform,\n nn.GELU(),\n model_mlm.vocab_layer_norm,\n model_mlm.vocab_projector\n )\n else:\n raise ValueError(f'{model_name_or_path} is not supported yet. try one of '\n f'{\", \".join(list(AvailableModels.__members__.keys()))}')\n model.to(device)\n model_mlm.to(device)\n # model_tok.to(device)\n model_cls.to(device)",
"def initiate(self):\n # if self.opt.checkpoint_encoder:\n # self.load(self.opt.checkpoint_encoder, self.opt.checkpoint_decoder)\n # else:\n # start fresh.\n self.model = Transformer(\n self.opt.src_vocab_size,\n self.opt.tgt_vocab_size,\n self.opt.max_token_seq_len,\n tgt_emb_prj_weight_sharing=self.opt.proj_share_weight,\n emb_src_tgt_weight_sharing=self.opt.embs_share_weight,\n d_k=self.opt.d_k,\n d_v=self.opt.d_v,\n d_model=self.opt.d_model,\n d_word_vec=self.opt.d_word_vec,\n d_inner=self.opt.d_inner_hid,\n n_layers=self.opt.layers,\n n_head=self.opt.n_head,\n dropout=self.opt.dropout).to(self.device)\n \n for p in self.model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)",
"def load_trained_model(filename = 'pricing_model.p'):\n # with ZipFile(\"model.zip\",\"r\") as w:\n # w.extractall()\n \n with open(filename, 'rb') as model:\n pricingmodel = pickle.load(model)\n \n # pricingmodel.Model_made = tf.keras.models.load_model(\"Model_made.h5\")\n # pricingmodel.Model_claim = tf.keras.models.load_model(\"Model_claim.h5\")\n \n \n return pricingmodel",
"def restore(self, model_file, head_i=0, trunk=False):\n if trunk:\n self.model_trunk.load_weights(model_file)\n else:\n self.models[head_i].load_weights(model_file)\n self.model = self.models[head_i]",
"def _predict_preproc_model(self, model_cfg, model,):\n model = self._make_model(model_cfg['model_name'], databunch=self._data)\n model.model_param = model_cfg['model_param']\n model.wrapper_params = model_cfg['wrapper_params']\n return(model)",
"def load_model(self):\n self.pred_net.load((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.load((self.save_path / \"iqn_target_net\").absolute().as_posix())"
] | [
"0.6822754",
"0.6722586",
"0.6549904",
"0.6507627",
"0.6432653",
"0.64292246",
"0.6409875",
"0.6334059",
"0.6267043",
"0.6259309",
"0.6258022",
"0.6249631",
"0.6218196",
"0.6192777",
"0.618744",
"0.6157598",
"0.6148145",
"0.6140525",
"0.61373776",
"0.6136138",
"0.6130443",
"0.6130443",
"0.61270684",
"0.61258173",
"0.61182684",
"0.61168486",
"0.61075497",
"0.60953087",
"0.60939044",
"0.6081079"
] | 0.69689536 | 0 |
Load learning rate from a saved file | def load_lr(filename):
learning_rate_array = []
with open(filename, "r") as f:
for line in f.readlines():
_, lr = line.strip().split(" ")
learning_rate_array.append(float(lr))
return learning_rate_array | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load(self, filename):\n param_dict = pickle.load(open('%s' % filename, 'rb'))\n self.learningrate = param_dict['learningrate']\n self.verbose = param_dict['verbose']\n self._loadsize = param_dict['loadsize']\n self._batchsize = param_dict['batchsize']\n self.momentum = param_dict['momentum']\n self.epochcount = param_dict['epochcount']\n self._momentum_batchcounter = param_dict['momentum_batchcounter']\n for param_name in param_dict['incs'].keys():\n for p in self._params:\n if p.name == param_name:\n self._incs[p].set_value(param_dict['incs'][param_name])\n if self.rmsprop is not None:\n for param_name in param_dict['avg_grad_sqrs'].keys():\n for p in self._params:\n if p.name == param_name:\n self._avg_grad_sqrs[p].set_value(param_dict['avg_grad_sqrs'][param_name])\n self._numbatches = self._loadsize // self._batchsize\n if self._inputs_type != 'function':\n self._numloads = self._inputs.shape[0] // self._loadsize\n if self._inputs_type == 'h5':\n self._inputs_theano.set_value(\n self._inputs.read(stop=self._loadsize))\n else:\n self._inputs_theano.set_value(self._inputs[:self._loadsize])",
"def load(self, filename, path=\".\"):\n if filename is None:\n if self.verbose:\n print(\"Neural Network Model Class - Save Function: No file name\")\n return -1\n\n #trn_params\n self.trn_params = NeuralNetworkParams()\n self.trn_params.load('%s_trn_params.pickle'%(filename),path=path)\n\n #model\n json_file = open(\"%s/%s_model.json\"%(path,filename), 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n loaded_model.load_weights(\"%s/%s_model.h5\"%(path,filename))\n self.model = loaded_model\n self.trained = True\n #trn_desc\n self.trn_desc = None\n self.trn_desc = pickle.load(open(\"%s/%s_trn_desc.pickle\"%(path,filename), \"rb\"))",
"def load(self, filename):\n self.model.load_weights(filename)",
"def load_nn(self, filename):\n self.weights_and_biases = (np.load(filename, allow_pickle=True)).tolist()\n print('Weights and biases are loaded')",
"def load_model(self, file_name):\n with open(file_name, 'rb') as file:\n self.lin_reg = pickle.load(file)",
"def load(self, filename):\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n # Set biases and weights\n self.W_input_to_hidden = data['wi']\n self.W_hidden_to_output = data['wo']",
"def _load_flexible_state_dict(model: Module, path: str) -> float:\n\n checkpoint = torch.load(path)\n\n learning_rate = checkpoint.get(\"learning_rate\", 1.0)\n # can get learning rate from optimizer state_dict?\n\n if \"module.\" in next(iter(checkpoint)):\n if isinstance(model, nn.DataParallel):\n model.load_state_dict(checkpoint)\n else:\n model = nn.DataParallel(model)\n model.load_state_dict(checkpoint)\n model = model.module\n else:\n if isinstance(model, nn.DataParallel):\n model = model.module\n model.load_state_dict(checkpoint)\n model = nn.DataParallel(model)\n else:\n model.load_state_dict(checkpoint)\n\n return learning_rate",
"def load(self, filename):\n\n c = torch.load(filename)\n\n if type(c) is dict:\n sd = c['state_dict']\n self.net.load_state_dict(sd)\n if 'monitors' in c: # Remove the branching eventually\n self.monitors = c['monitors']\n else:\n self.monitors = {'loss_train': c['train_monitor'], 'loss_val': c['val_monitor'],\n 'accu_train': MetricHistory(), 'accu_val': MetricHistory()}\n if 'optimizer' in c: # Remove the branching eventually\n self.optimizer.load_state_dict(c['optimizer'])\n else:\n raise RuntimeError('Unsupported checkpoint. (Not a dict)')\n\n self.parent = filename\n self.last_checkpoint = filename\n self.start_epoch = self.monitors['loss_train'].num_epochs",
"def load_model(self):\n if os.stat('code/lr-model.pt').st_size == 0:\n return\n params = torch.load('code/lr-model.pt')\n self.set_params(params)",
"def load(self, filename):\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n # Set biases and weights\n self.W_input_to_hidden = data['wi']\n self.W_hidden_to_hidden = data['wh']\n self.W_hidden_to_output = data['wo']",
"def learning_rate(epoch):\n self.lr = self.lr / 1.00000001\n return self.lr",
"def load(self, path, nr_of_saves, test_it=-1):\n with self.graph.as_default():\n print(\"Loading networks...\")\n checkpoint_dir = os.path.join(os.environ['APPROXIMATOR_HOME'], path, \"network-\"+str(test_it))\n self.saver = tf.train.Saver(max_to_keep=nr_of_saves+1)\n try:\n self.saver.restore(self.sess, checkpoint_dir)\n print(\"Loaded: {}\".format(checkpoint_dir))\n except Exception:\n if test_it <= 0:\n # Initialize the variables\n self.sess.run(tf.global_variables_initializer())\n print(\"Failed! Initializing the network variables...\")\n else:\n raise",
"def train(self, trainfile):",
"def load_checkpoint(self, path: str = '', train: bool = True) -> int:\n\n if not path:\n dir_ = os.path.dirname(os.path.realpath('__file__'))\n path = os.path.join(dir_, 'model.pt')\n\n try:\n ckpt = torch.load(path)\n except FileNotFoundError:\n return 0\n else:\n print('Loaded model at epoch: ', end='')\n\n self.load_state_dict(ckpt['model_state_dict'])\n self.actor_optimizer.load_state_dict(ckpt['ac_optim_dict'])\n self.critic_optimizer.load_state_dict(ckpt['critic_optim_dict'])\n epoch = ckpt['epoch']\n\n print(epoch)\n\n if not train:\n self.eval()\n else:\n self.train()\n\n return epoch",
"def load(self, sess, file_path, verbose=True):\n if(verbose): print(\"Loading model from: \" + str(file_path))\n self.tf_saver.restore(sess, file_path)\n if(verbose): print(\"Done!\")",
"def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]",
"def load_from_checkpoint(self, path):\n print(f'# loading trainer state from {path}')\n checkpoint = torch.load(path)\n self.load(checkpoint)",
"def load_weights(self, path=None):\n\n if path is None:\n path = self.checkpoints_dir\n\n self.model.load_weights(tf.train.latest_checkpoint(path))\n logging.info(f'\\tWeights loaded from {path}')",
"def _load_training_data(self):\n self._save_training_data()",
"def load_weights(self, path: str):\n self.load_state_dict(torch.load(path))",
"def load_weights(self, path: str):\n self.load_state_dict(torch.load(path))",
"def load_weights(self, filepath):\n self.model.load_weights(filepath)",
"def _load_next_file(self):\n\n gains = super()._load_next_file()\n self._time_ptr = 0\n\n return gains",
"def load_examples(filename):\r\n data = np.load(filename)\r\n return data['examples'], int(data['srate'])",
"def load_examples(filename):\r\n data = np.load(filename)\r\n return data['examples'], int(data['srate'])",
"def load(self, filename):\n with open(filename, 'r') as f:\n self.pca.set_params(pickle.load(f))\n self.fit = True",
"def load(self, filename):\n with open(filename, 'r') as f:\n self.max_val = pickle.load(f)",
"def load_checkpoint(self, file):\n \"\"\"Load \"\"\"\n chkpnt = torch.load(file)\n self.load_state_dict(chkpnt['model_state_dict'])",
"def load_model(self, filename):\r\n pass",
"def load_training_data(file_path):\n return load_data(file_path)"
] | [
"0.67811286",
"0.6583575",
"0.6550467",
"0.6278531",
"0.62552196",
"0.619063",
"0.61676574",
"0.6159735",
"0.61502033",
"0.614148",
"0.61373883",
"0.61225915",
"0.61004525",
"0.6077979",
"0.605477",
"0.59805137",
"0.59794164",
"0.5961897",
"0.5954225",
"0.5944981",
"0.5944981",
"0.5936739",
"0.5934484",
"0.5926107",
"0.5926107",
"0.5888774",
"0.58833677",
"0.5883207",
"0.58789134",
"0.5877967"
] | 0.71505105 | 0 |
Load valid loss from a saved file | def load_valid_loss(filename):
min_loss = ValidLoss()
with open(filename, "r") as f:
for line in f.readlines():
epoch, loss = line.strip().split(" ")[:2]
epoch = int(epoch)
loss = float(loss)
if loss < min_loss.min_loss:
min_loss.min_loss = loss
min_loss.min_loss_epoch = epoch
return min_loss | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_checkpoint(self, file):\n \"\"\"Load \"\"\"\n chkpnt = torch.load(file)\n self.load_state_dict(chkpnt['model_state_dict'])",
"def load_network(self, sess, filename):\n dir_path = os.path.dirname(os.path.realpath(__file__))\n dir_path += '/Models/'\n dir_path += filename\n saver = tf.train.Saver()\n saver.restore(sess, dir_path)",
"def load(self, sess, file_path, verbose=True):\n if(verbose): print(\"Loading model from: \" + str(file_path))\n self.tf_saver.restore(sess, file_path)\n if(verbose): print(\"Done!\")",
"def load_checkpoint(filename: str) -> CheckpointData:\n return torch.load(filename)",
"def load(self, filename):\n\n c = torch.load(filename)\n\n if type(c) is dict:\n sd = c['state_dict']\n self.net.load_state_dict(sd)\n if 'monitors' in c: # Remove the branching eventually\n self.monitors = c['monitors']\n else:\n self.monitors = {'loss_train': c['train_monitor'], 'loss_val': c['val_monitor'],\n 'accu_train': MetricHistory(), 'accu_val': MetricHistory()}\n if 'optimizer' in c: # Remove the branching eventually\n self.optimizer.load_state_dict(c['optimizer'])\n else:\n raise RuntimeError('Unsupported checkpoint. (Not a dict)')\n\n self.parent = filename\n self.last_checkpoint = filename\n self.start_epoch = self.monitors['loss_train'].num_epochs",
"def load(self):\n self.word2vec, self.img2sentence, self.word_freq, self.num_words, self.word2idx, self.idx2word = pickle.load(open(self.save_file, 'rb'))",
"def load_checkpoint(filename, from_gpu=True):\r\n assert os.path.exists(filename)\r\n if from_gpu:\r\n return torch.load(filename)\r\n else:\r\n return torch.load(filename, map_location=lambda storage, loc: storage)",
"def load_eval(saver, session, load_dir):\n saver.restore(session, load_dir)\n print('model loaded successfully')\n return extract_step(load_dir)",
"def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]",
"def load_model(save_folder, filename):\n print(\"Warning: Make sure older models with this name have been trained on the same features! Otherwise,\"\n \"if the lengths of the features the model has been trained on, differ, an error will occur!\")\n import pickle\n path = save_folder + filename\n with open(path, 'rb') as handle:\n return pickle.load(handle)",
"def load_model(self, filename):\r\n pass",
"def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])",
"def save_loss(self, path='./loss'):\n np.save(path, self.critic_losses)",
"def load(self, filename, path=\".\"):\n if filename is None:\n if self.verbose:\n print(\"Neural Network Model Class - Save Function: No file name\")\n return -1\n\n #trn_params\n self.trn_params = NeuralNetworkParams()\n self.trn_params.load('%s_trn_params.pickle'%(filename),path=path)\n\n #model\n json_file = open(\"%s/%s_model.json\"%(path,filename), 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n loaded_model.load_weights(\"%s/%s_model.h5\"%(path,filename))\n self.model = loaded_model\n self.trained = True\n #trn_desc\n self.trn_desc = None\n self.trn_desc = pickle.load(open(\"%s/%s_trn_desc.pickle\"%(path,filename), \"rb\"))",
"def load_checkpoint(model, save_path):\n model.load_state_dict(torch.load(save_path))",
"def load_model(self, file_name):\n with open(file_name, 'rb') as file:\n self.lin_reg = pickle.load(file)",
"def load_nn(self, filename):\n self.weights_and_biases = (np.load(filename, allow_pickle=True)).tolist()\n print('Weights and biases are loaded')",
"def load(self, filename):\n self.model.load_weights(filename)",
"def load_model(model_file, model_step_file, loss):\n logging.log(logging.INFO, f\"Loading model: {model_file}\")\n\n model = tf.keras.models.load_model(\n model_file, custom_objects={\"loss\": loss})\n\n model_epochs, global_step, metric, val_score, best_score = file_io.read_csv(\n model_step_file)[0]\n\n model_epochs = int(model_epochs)\n global_step = int(global_step)\n val_score = float(val_score)\n best_score = float(best_score)\n\n logging.log(\n logging.INFO,\n f\"Model trained for {model_epochs} epochs ({global_step} steps)\")\n logging.log(\n logging.INFO,\n f\"Validation: current {metric}: {val_score:.5f}, previous best \"\n f\"{metric}: {best_score:.5f}\")\n\n return model, (global_step, model_epochs, val_score, best_score)",
"def load_gold(train_gold_file):\n with codecs.open(train_gold_file, 'r', 'utf-8') as f_in:\n lines = [line.strip().split('\\t') for line in f_in]\n\n train_gold = { (w1, w2) : {} for (w1, w2, paraphrase, score) in lines }\n for w1, w2, paraphrase, score in lines:\n train_gold[(w1, w2)][paraphrase] = float(score)\n\n return train_gold",
"def load(self, filename):\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n # Set biases and weights\n self.W_input_to_hidden = data['wi']\n self.W_hidden_to_output = data['wo']",
"def load_game(self):\n print('Game loaded!')\n return pickle.load(open(\"save.dat\", 'rb'))",
"def load_checkpoint(self, filename, load_optim=True):\n extra_state, optim_history, last_optim_state = \\\n utils.load_model_state(filename, self.get_model())\n\n if last_optim_state is not None:\n # rebuild optimizer after loading model, since params may have changed\n #self.optimizer = optim.build_optimizer(self.args, self.model.parameters())\n self.lr_scheduler = lr_scheduler.build_lr_scheduler(self.args, self.optimizer)\n\n if load_optim:\n self._optim_history = optim_history\n # only reload optimizer and lr_scheduler if they match\n last_optim = self._optim_history[-1]\n if last_optim['criterion_name'] == self.criterion.__class__.__name__:\n self.lr_scheduler.load_state_dict(last_optim['lr_scheduler_state'])\n if last_optim['optimizer_name'] == self.optimizer.__class__.__name__:\n self.optimizer.load_state_dict(last_optim_state)\n\n self._num_updates = last_optim['num_updates']\n\n return extra_state",
"def load_model(self, win_len, axis):\n\n print('Load model')\n if 'ouisir' not in self.name_dataset.lower():\n self.feature_extractor = resnet2D(\n False, 0, 0, stride=2, feature_generator=True)\n else:\n self.feature_extractor = resnet2D(\n False, 0, 0, stride=2, feature_generator=True)\n self.feature_extractor.build((None, win_len, axis, 1))\n self.feature_extractor.load_weights(\n self.path_save_model + self.name_model + '.h5', by_name=True)\n\n print('Load mean and std')\n self.mean = np.load(self.path_save_model + 'mean.npy')\n self.std = np.load(self.path_save_model + 'std.npy')",
"def _load_restored(self, dataset_path):\n for group in ['knowledge', 'source', 'target']:\n if getattr(self, group + '_format') != 'none':\n text_data = load_restored(dataset_path, group + '.', ignore_file='vocab')[0]\n setattr(self, group + '_text_data', text_data)\n idx2token, token2idx = load_restored(dataset_path, ignore_file='data')\n setattr(self, 'idx2token', idx2token)\n setattr(self, 'token2idx', token2idx)\n self.max_vocab_size = len(self.idx2token)\n self.logger.info(\"Restore finished!\")",
"def load(self, filename):\n with open(filename, 'rb') as f:\n data = pickle.load(f)\n # Set biases and weights\n self.W_input_to_hidden = data['wi']\n self.W_hidden_to_hidden = data['wh']\n self.W_hidden_to_output = data['wo']",
"def load_model(self):\n self.pred_net.load((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.load((self.save_path / \"iqn_target_net\").absolute().as_posix())",
"def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))",
"def test_load():\n\n # make a model, train then save it\n model, X, y, Xval, yval = make_small_model()\n loss = tf.keras.losses.CategoricalCrossentropy(\n from_logits=False, reduction=tf.losses.Reduction.NONE\n )\n model.compile(loss=loss, optimizer=None)\n model.fit(X, y, validation_data=(Xval, yval), epochs=EPOCHS, batch_size=20)\n model.save(\"keras_save.tf\")\n\n # won't load with invalid names\n ok, _ = safekeras.load_safe_keras_model()\n assert ok is False, \"can't load with no model file name\"\n\n ok, _ = safekeras.load_safe_keras_model(\"keras_save.h5\")\n assert ok is False, \"can only load from .tf file\"\n\n # should load fine with right name\n ok, reloaded_model = safekeras.load_safe_keras_model(\"keras_save.tf\")\n assert ok is True\n ypred = \"over-write-me\"\n ypred = reloaded_model.predict(X)\n assert isinstance(ypred, np.ndarray)\n\n cleanup_file(\"keras_save.tf\")\n cleanup_file(\"tfsaves\")",
"def load(self, load_file, quiet=False):\n if not os.path.isfile(load_file):\n print(\"ERROR: File does not exist\")\n exit(1)\n else:\n state = torch.load(load_file)\n self.model.load_state_dict(state['model'])\n self.optimizer.load_state_dict(state['optimizer'])\n if not quiet: print(\"Model and optimizer states loaded successfully!\")"
] | [
"0.6473382",
"0.63657707",
"0.61991316",
"0.6178769",
"0.6170918",
"0.59643334",
"0.5960667",
"0.5960353",
"0.5944361",
"0.59310794",
"0.5908972",
"0.5874426",
"0.58355534",
"0.5824624",
"0.5812557",
"0.5805543",
"0.580154",
"0.57972693",
"0.5791554",
"0.57738537",
"0.57725614",
"0.57508767",
"0.57445204",
"0.57417053",
"0.5725475",
"0.5721334",
"0.57191974",
"0.57141906",
"0.5701896",
"0.5697237"
] | 0.75367886 | 0 |
Compute pairwise EER using cosine similarity. The EER is estimated by interp1d and brentq, so it is not the exact value and may be a little different each time. | def compute_cos_pairwise_eer(embeddings, labels, max_num_embeddings=1000):
embeddings /= np.sqrt(np.sum(embeddings ** 2, axis=1, keepdims=True) + 1e-12)
num_embeddings = embeddings.shape[0]
if num_embeddings > max_num_embeddings:
# Downsample the embeddings and labels
step = num_embeddings / max_num_embeddings
embeddings = embeddings[range(0, num_embeddings, step), :]
labels = labels[range(0, num_embeddings, step)]
num_embeddings = embeddings.shape[0]
score_mat = np.dot(embeddings, np.transpose(embeddings))
scores = np.zeros((num_embeddings * (num_embeddings - 1) / 2))
keys = np.zeros((num_embeddings * (num_embeddings - 1) / 2))
index = 0
for i in range(num_embeddings - 1):
for j in range(i + 1, num_embeddings):
scores[index] = score_mat[i, j]
keys[index] = 1 if labels[i] == labels[j] else 0
index += 1
fpr, tpr, thresholds = metrics.roc_curve(keys, scores, pos_label=1)
eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.)
# thresh = interp1d(fpr, thresholds)(eer)
with open("test.txt", "w") as f:
for i in range(num_embeddings):
if keys[i] == 1:
f.write("%f target" % scores[i])
else:
f.write("%f nontarget" % scores[i])
return eer | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def compute_EER(self, FAR, FRR):\r\n print('Computing EER')\r\n distance = abs(FAR - FRR)\r\n min_distance = min(distance)\r\n idx = np.where(distance == min_distance)\r\n return np.mean((FAR[idx] + FRR[idx]) / 2)",
"def similarity(self, e1, e2):\n\t\tpass",
"def E(q, r0, x, y):\n den = np.hypot(x - r0[0], y - r0[1]) ** 3\n return q * (x - r0[0]) / den, q * (y - r0[1]) / den",
"def cos_anneal(e0, e1, t0, t1, e):\n alpha = max(0, min(1, (e - e0) / (e1 - e0))) # what fraction of the way through are we\n alpha = 1.0 - math.cos(alpha * math.pi / 2) # warp through cosine\n t = alpha * t1 + (1 - alpha) * t0 # interpolate accordingly\n return t",
"def _EStep(x, centers):\n nbitem = x.shape[0]\n z = - np.ones(nbitem).astype(np.int_)\n mindist = np.inf * np.ones(nbitem)\n k = centers.shape[0]\n for q in range(k):\n dist = np.sum((x - centers[q]) ** 2, 1)\n z[dist < mindist] = q\n mindist = np.minimum(dist, mindist)\n J = mindist.sum()\n return z, J",
"def uncertainty_ee(self,e1,e2):\n # reco\n unc = (self._eleRecoWeight[(e1.pt(),e1.eta())][1]/self._eleRecoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleRecoWeight[(e2.pt(),e2.eta())][1]/self._eleRecoWeight[(e2.pt(),e2.eta())][0])**2\n # id-isolation\n unc += (self._eleIdIsoWeight[(e1.pt(),e1.eta())][1]/self._eleIdIsoWeight[(e1.pt(),e1.eta())][0] + \\\n self._eleIdIsoWeight[(e2.pt(),e2.eta())][1]/self._eleIdIsoWeight[(e2.pt(),e2.eta())][0])**2\n # trigger (approximate)\n unc += (abs(self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n unc += ((self._ele8TrgWeight[(e1.pt(),e1.eta())][1]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][1])/ \\\n (self._ele8TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]+ \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele8TrgWeight[(e2.pt(),e2.eta())][0]- \\\n self._ele17TrgWeight[(e1.pt(),e1.eta())][0]*self._ele17TrgWeight[(e2.pt(),e2.eta())][0]))**2\n #outcome\n return sqrt(unc)",
"def extended_euclidean(self):\n self.a = gmpy2.invert(self.e1, self.e2)\n self.b = (float(self.gcd(self.e1, self.e2)-(self.a*self.e1)))/float(self.e2)",
"def pair_energy(self,e, s, r):\n return 4.0*e*((s/r)**12-(s/r)**6)",
"def qe(np_points, np_centers):\n a1 = np.sum(np.power(np_points, 2), axis=1)\n a2 = np.dot(np_points, np_centers.T)\n a3 = np.sum(np.power(np_centers, 2), axis=1)\n dist = - 2*a2 + a3[np.newaxis, :]\n mindist = np.min(dist, axis=1) + a1\n error = np.sum(mindist)\n return error",
"def dist_eccen(self, e):\r\n\r\n return self.uniform(e, self.erange)",
"def energy_func(self):\n i = self.inl[0].to_flow()\n o = self.outl[0].to_flow()\n\n T_m = (T_mix_ph(i, T0=self.inl[0].T.val_SI) +\n T_mix_ph(o, T0=self.outl[0].T.val_SI)) / 2\n\n iam = (\n 1 - self.iam_1.val * abs(self.aoi.val) -\n self.iam_2.val * self.aoi.val ** 2)\n\n return (i[0] * (o[2] - i[2]) -\n self.A.val * (\n self.E.val * self.eta_opt.val * self.doc.val ** 1.5 * iam -\n (T_m - self.Tamb.val_SI) * self.c_1.val -\n self.c_2.val * (T_m - self.Tamb.val_SI) ** 2))",
"def evaluate(self, X1, X2):\r\n\r\n \"\"\"YOUR CODE HERE FOR Q1.1\"\"\"\r\n # raise NotImplementedError()\r\n n1, d = X1.shape\r\n n2, _ = X2.shape\r\n D = euclidean_dist_squared(X1, X2)\r\n return np.exp(-D / (2 * self.sigma**2))",
"def test_equivalence():\n\t\n\tfrom . import spectra as sp\n\t\n\t#analytic\n\tp_dict = {'Bfield':15000,'rb85frac':1,'Btheta':0*np.pi/180,'Bphi':0*np.pi/180,'lcell':1e-3,'T':84,'Dline':'D2','Elem':'Rb'}\n\tchiL1,chiR1,chiZ1 = sp.calc_chi([-18400],p_dict)\n\tRotMat1, n11, n21 = solve_diel(chiL1,chiR1,chiZ1,0,150,force_numeric=False)\n\t\n\t#numeric\n\tchiL2, chiR2, chiZ2 = chiL1, chiR1, chiZ1\n\t#chiL2,chiR2,chiZ2 = sp.calc_chi([-18400],p_dict)\n\tRotMat2, n12, n22 = solve_diel(chiL2,chiR2,chiZ2,0,150,force_numeric=True)\n\t\n\tprint('RM 1')\n\tprint(RotMat1)\n\n\tprint('RM 2')\n\tprint(RotMat2)\t\n\t\n\tprint('n1_1 (analytic)')\n\tprint(n11)\n\tprint('n1_2')\n\tprint(n12)\n\tprint('n2_1 (analytic)')\n\tprint(n21)\n\tprint('n2_2')\n\tprint(n22)\n\t\n\tprint('chi1')\n\tprint((chiL1, chiR1, chiZ1))\n\n\tprint('chi2')\n\tprint((chiL2, chiR2, chiZ2))",
"def euc_dist(self, squared=True):",
"def quick_e_score(self, n1, n2):\n if n1.needs_update:\n n1._update()\n if n2.needs_update:\n n2._update()\n dists = cdist(n1.mat, n2.mat,'cosine')\n return -np.max(dists)",
"def _inv_epsilon_eval(z, A, ord=2):\n z=np.array(z)\n A=np.array(A)\n zc = complex(z[0], z[1])\n try :\n iep = spl.norm(spl.inv(zc*np.eye(*A.shape)-A),ord=ord)\n except TypeError:\n if ord==\"svd\":\n iep = 1/np.min(spl.svdvals(zc*np.eye(*A.shape)-A))\n else: raise Exception(\"invalid method\")\n return iep",
"def test_eccentric_anomaly(ecc, manom, tol=1e-8):\n # 2012-10-15 21:46 IJMC: Created, for my own curiosity.\n from time import time\n\n e0 = np.zeros(manom.size)\n e1 = np.zeros(manom.size)\n e2 = np.zeros(manom.size)\n e3 = np.zeros(manom.size)\n\n tic = time()\n for ii,element in enumerate(manom):\n def kep(e): return element - e + ecc*sin(e)\n e0[ii] = optimize.brentq(kep, element-1, element+1, xtol=tol, disp=False)\n toc0 = time() - tic\n\n tic = time()\n for ii,element in enumerate(manom):\n def kep(e): return element - e + ecc*sin(e)\n e1[ii] = optimize.newton(kep, ecc, tol=tol)\n toc1 = time() - tic\n\n tic = time() \n guessfactor = np.pi * (ecc+0.01) / 0.81 # guess=pi for ecc=0.8\n for ii,element in enumerate(manom): # Explicit Newton's method\n err = tol*10\n val = guessfactor\n while np.abs(err) > tol:\n err = (element + ecc*np.sin(val) - val) / (1. - ecc*np.cos(val))\n val += err\n e2[ii] = val\n toc2 = time() - tic\n \n tic = time()\n for ii,element in enumerate(manom): # simple iteration:\n err = tol*10\n oldval = 0.\n while np.abs(err) > tol:\n val = element + ecc * np.sin(oldval)\n err = val - oldval\n oldval = val\n e3[ii] = val\n toc3 = time() - tic\n \n print \"SciPy BrentQ: [%1.6f, %1.6f, ....] -- %1.4f s\" % (e0[0], e0[1], toc0)\n print \"SciPy Newton: [%1.6f, %1.6f, ....] -- %1.4f s\" % (e1[0], e1[1], toc1)\n print \"Explicit Newton: [%1.6f, %1.6f, ....] -- %1.4f s\" % (e2[0], e2[1], toc2)\n print \"Simple iteration: [%1.6f, %1.6f, ....] -- %1.4f s\" % (e3[0], e3[1], toc3)\n return",
"def _epsilon_eval(z, A, ord=2):\n z=np.array(z)\n A=np.array(A)\n zc = complex(z[0], z[1])\n try :\n ep = 1/spl.norm(spl.inv(zc*np.eye(*A.shape)-A),ord=ord)\n # ep = spl.norm(zc*np.eye(*A.shape)-A,ord=ord)\n except TypeError:\n if ord==\"svd\":\n ep = np.min(spl.svdvals(zc*np.eye(*A.shape)-A))\n else: raise Exception(\"invalid method\")\n return ep",
"def pairwise_energy(R,V,M,G):\r\n d = util.pairwise_enod(R) # relative distances between all bodies\r\n v = util.pairwise_enod(V) # relative velocities between all bodies\r\n \r\n N = R.shape[0]\r\n E = np.zeros(int(N*(N-1)/2))\r\n pair_index = 0\r\n for n in range(0,N-1):\r\n for nn in range (n+1, N):\r\n E[pair_index] = -(G*M[n]*M[nn] / d[pair_index]) + 0.5 * (M[n]*M[nn])/(M[n]+M[nn]) * (v[pair_index]**2)\r\n pair_index = pair_index+1 \r\n return E",
"def calculate_E0(self) -> float:\n noisy = self.kernel_eigenvectors_[-1].copy()\n np.random.shuffle(noisy)\n\n kernel_eigenvectors = self.kernel_eigenvectors_[:-1]\n kernel_eigenvectors.append(noisy)\n\n eigenvectors_matrix = scipy.sparse.csr_matrix(\n np.column_stack([eigenvector for eigenvector in kernel_eigenvectors])\n )\n\n if len(kernel_eigenvectors) == 2:\n ev0 = kernel_eigenvectors[0]\n ev1 = kernel_eigenvectors[1]\n _, Gamma, _ = scipy.sparse.linalg.svds(\n ev0.T @ ev1, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n else:\n _, Gamma, _ = scipy.sparse.linalg.svds(\n eigenvectors_matrix, k=self.n_jointly_smooth_functions, which=\"LM\"\n )\n\n Gamma.sort()\n gamma2 = Gamma[-2]\n E0 = (1 + gamma2) / 2\n return E0",
"def calculate_E(self):\n \n E = 0\n for i in xrange(self.size):\n Ei = self.h[i]\n Ei += 0.5*sum((1 if self.spins[j] else -1)*self.J[i,j] for j in self.adjacency[i])\n if not self.spins[i]:\n Ei *= -1\n E += Ei\n \n return E",
"def _calc_Em(self):\n return (self.parameters.E0 +\n self.x * sqrt2 * self.parameters.sigma * self.mt)",
"def eccentricity(self):\n return self.b / self.a",
"def E_0_t(x_r, x_r_prime, w_n_1, candidate, d_prev):\n temp = (1. / SIGMA_V ** 2.) * \\\n w_n_1[x_r_prime] * \\\n linalg.norm(candidate - d_prev[x_r_prime]) ** 2.\n\n return temp",
"def _E(self, chi, eta, iota, gamma, vphi, gamma2, u1, u3, math_v, gamma3):\n c0 = gamma2 * (1 + u1 * np.cos(iota) * np.sin(eta) + u3 * np.sin(iota) * np.sin(eta))\n #c1 = -gamma2 * u1 + (1 + gamma2**2 * u1**2 / (1 + gamma2)) * np.cos(iota) * np.sin(eta) + \\\n # (gamma2 **2 * u1 * u3 / (1 + gamma2)) * np.sin(iota) * np.sin(eta)\n c3 = -gamma2 * u1 + (gamma2 **2 * u1 * u3 / (1 + gamma2)) * np.cos(iota) * np.sin(eta) + \\\n (1 + gamma2**2 * u3**2 / (1 + gamma2)) * np.sin(iota) * np.sin(eta)\n\n b0 = gamma3 * (c0 + math_v * c3)\n b3 = gamma3 * math_v * c0 + (1 + gamma3**2 * math_v**2 / (1 + gamma3)) * c3\n\n a0 = gamma * (b0 + vphi * b3)\n\n return chi * a0",
"def rae(self) -> float:\n return float(np.sum(self._ae()) / (np.sum(np.abs(self.true - np.mean(self.true))) + EPS))",
"def energy_func(self):\n return (\n self.inl[0].m.val_SI * (\n self.outl[0].h.val_SI - self.inl[0].h.val_SI) +\n self.inl[1].m.val_SI * (\n self.outl[1].h.val_SI - self.inl[1].h.val_SI))",
"def E(self, dstrct):\n rep_votes = dstrct.rep_votes + self.properties['sen_red']\n dem_votes = dstrct.dem_votes + self.properties['sen_blue']\n\n thresh = threshold(rep_votes+dem_votes)\n rep_wasted = wasted_votes(rep_votes, thresh)\n dem_wasted = wasted_votes(dem_votes, thresh)\n gap = (rep_wasted - dem_wasted)/(rep_votes + dem_votes)\n score = 1-abs(gap)\n\n self.E_ = self.w_E * score\n return self.E_",
"def evse(self, data, *args, **kwargs):\n darr = np.array(data)\n d = darr if len(darr.shape) == 1 else darr[0] / darr[1]\n return (d - self.evs(darr, *args, **kwargs))**2",
"def find_euc_dist(list_par1, list_par2):\r\n\r\n # First find the squared Euclidian distance\r\n temp_euc_dist = np.subtract(list_par1, list_par2)\r\n euc_dist = np.sum(np.square(temp_euc_dist))\r\n return pow(euc_dist, .5)"
] | [
"0.61746013",
"0.58410585",
"0.56296",
"0.55749977",
"0.55027765",
"0.545726",
"0.5402459",
"0.5398751",
"0.5388317",
"0.5339811",
"0.533653",
"0.5312333",
"0.5311478",
"0.52783436",
"0.5232789",
"0.522407",
"0.52214503",
"0.52191997",
"0.5212163",
"0.52099633",
"0.52089626",
"0.5193176",
"0.51908416",
"0.51712036",
"0.5154567",
"0.5146644",
"0.51301706",
"0.5114794",
"0.5106409",
"0.5106004"
] | 0.5987465 | 1 |
Check whether part of the string s appears in the list. | def substring_in_list(s, varlist):
if varlist is None:
return False
is_sub = False
for v in varlist:
if v in s:
is_sub = True
break
return is_sub | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_string(s, strings):\n for string in strings:\n if string not in s:\n return False\n return True",
"def check(s,l):\n if len(s)==1:\n if s[0] in l:\n return False\n else:\n return True\n else:\n if s[0] in l:\n return False\n else:\n l.append(s[0])\n return check(s[1:],l)",
"def find(ss, list_seq):\n\tfor item in list_seq:\n\t\tif item in ss:\n\t\t\treturn True\n\treturn False",
"def check_word_in_list_in_string(list, string):\n stuff = [string for word in list if(word in string)]\n return stuff",
"def list_has_substring(substring, l):\n found_substring = False\n for item in l:\n if substring in item:\n found_substring = True\n break\n\n return found_substring",
"def contains(s, v):\n head = s\n while not empty(head):\n if head.first == v:\n return True\n head = head.rest\n return False",
"def contained(self,s):\n\n if s in self.symbols:\n return True\n else:\n return False",
"def check(self, s: str, mem: dict):\n dp = [False for _ in range(len(s)+1)]\n dp[0] = True\n for i in range(1, len(s)+1):\n for j in range(i):\n if dp[j] and s[j:i] in mem:\n dp[i] = True\n return dp[-1]",
"def contains(self, searchstr: str):\n for x in self.sa:\n if searchstr in x:\n return True\n pass",
"def fn(p, s):\n ss = iter(s)\n return all(ch in ss for ch in p)",
"def part_exists(requested_part:str, parts:list):\n return requested_part.lower() in parts",
"def contains(s, v):\n if empty(s):\n return False\n elif s.first == v:\n return True\n else:\n return contains(s.rest, v)",
"def issubstring(substring, string):\n return substring in string",
"def has_prefix_some(s, prefix_set):\n\tfor prefix in prefix_set:\n\t\tif s.find(prefix, 0) != -1:\n\t\t\treturn True\n\treturn False",
"def match(self, s):\n if self.re.match(s):\n self.list.append(s)\n return True\n else: return False",
"def isValid(self, s):\n for valid in self.validTargets:\n if (valid[0] in s):\n return True\n return False",
"def log_contains(self, s: str) -> bool:\n return len(list(filter(lambda str: s in str, self.logs))) > 0",
"def search_for_string(lst_str, stringy):\n if stringy in lst_str:\n return \"Found string\"\n\n else:\n return \"string not found\"",
"def dz_is_in(dz_string, substring):\n if substring not in dz_string:\n return 0\n else:\n return 1",
"def check_the_list_for_matching(checked_list: list, phrase_to_match: str) -> bool:\n for word in checked_list:\n if phrase_to_match.startswith(word):\n return True\n return False",
"def hasSubstring(self, s):\n node, off = self.followPath(s)\n return node is not None",
"def check_for_strings(text, strings):\n for string in strings:\n if text.find(string) >= 0:\n return True\n return False",
"def contains(self, searchstr: str):\n index = mybinsearch(self.sarray, searchstr, self.comp)\n if index < 0:\n return False\n return True",
"def CompareNodes(self,s,list):\n return s in list",
"def property_3(string):\n pairs = [ 'ab', 'cd', 'pq', 'xy' ]\n for pair in pairs:\n if pair in string:\n return False\n return True",
"def property_2(string):\n for letter in al:\n pair = letter + letter\n if pair in string:\n return True\n return False",
"def isValid(self, s: str) -> bool:\n st = []\n\n for char in s:\n if (len(st) != 0):\n e = st[-1]\n if (self.isValidPair(e,char)):\n st.pop()\n continue\n st.append(char)\n return (len(st)==0)",
"def search(self, word: str) -> bool:\n # Checking if the word is present in the list.\n return word in self.mylist",
"def question1(s, t):\n if type(t) == str and type(s) == str and len(s) >= len(t):\n for letter in t:\n if letter not in s:\n return False\n return True\n else:\n return False",
"def exists(self, string) -> bool:\n if string in self.index:\n return(True)\n else:\n return(False)"
] | [
"0.72143257",
"0.71113634",
"0.7025717",
"0.6971482",
"0.6968255",
"0.6905735",
"0.6830441",
"0.68250275",
"0.6770632",
"0.6740407",
"0.67399365",
"0.66369486",
"0.6623817",
"0.65804917",
"0.657662",
"0.6519753",
"0.64646107",
"0.6462173",
"0.6444658",
"0.6384393",
"0.63813186",
"0.63525367",
"0.635026",
"0.6280081",
"0.6241611",
"0.62255704",
"0.62172556",
"0.62126315",
"0.6204341",
"0.61856806"
] | 0.72698176 | 0 |
Create a summary for activations given the endpoints. | def activation_summaries(endpoints):
sum = []
with tf.name_scope('summaries'):
for act in endpoints.values():
tensor_name = act.op.name
sum.append(tf.summary.histogram(tensor_name + '/activations', act))
# sum.append(tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(act)))
return tf.summary.merge(sum) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _activation_summary(x):\n tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)\n tf.summary.histogram(tensor_name + '/activations', x)\n tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))",
"def _activation_summary(x):\n # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training\n # session. This helps the clarity of presentation on tensorboard.\n tensor_name = re.sub('%s_[0-9]*/' % 'tower', '', x.op.name)\n tf.summary.histogram(tensor_name + '/activations', x)\n tf.summary.scalar(tensor_name + '/sparsity',\n tf.nn.zero_fraction(x))",
"def _activation_summary(x):\n # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training\n # session. This helps the clarity of presentation on tensorboard.\n tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)\n tf.summary.histogram(tensor_name + '/activations', x)\n tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))",
"def _activation_summary(x):\n # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training\n # session. This helps the clarity of presentation on tensorboard.\n tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)\n tf.summary.histogram(tensor_name + '/activations', x)\n tf.summary.scalar(tensor_name + '/sparsity',\n tf.nn.zero_fraction(x))",
"def _activation_summary(x):\n # session. This helps the clarity of presentation on tensorboard.\n tf.summary.histogram(x.op.name + '/activations', x)\n tf.summary.scalar(x.op.name + '/sparsity', tf.nn.zero_fraction(x))",
"def _activation_summary(x):\n\n tf.summary.histogram(x.op.name + '/activations', x)\n tf.summary.scalar(x.op.name + '/sparsity', tf.nn.zero_fraction(x))",
"def _activation_summary(x):\n # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training\n # session. This helps the clarity of presentation on tensorboard.\n tensor_name = x.op.name\n # tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)\n tf.summary.histogram(tensor_name + '/activations', x)\n\n tf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))",
"def _activation_summary(x):\n # Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training\n # session. This helps the clarity of presentation on tensorboard.\n tensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)\n tf.histogram_summary(tensor_name + '/activations', x)\n tf.scalar_summary(tensor_name + '/sparsity', tf.nn.zero_fraction(x))",
"def _activation_summary(x):\n\t# Remove 'tower_[0-9]/' from the name in case this is a multi-GPU training\n\t# session. This helps the clarity of presentation on tensorboard.\n\ttensor_name = re.sub('%s_[0-9]*/' % TOWER_NAME, '', x.op.name)\n\ttf.summary.histogram(tensor_name + '/activations', x)\n\ttf.summary.scalar(tensor_name + '/sparsity', tf.nn.zero_fraction(x))",
"def present_summary(services, methods, count, backup):\n print_heading(\"Summary\")\n if backup is not None:\n writer(f\"Backup: {backup}\")\n writer(f\"Showing {count[0]}/{len(services)} Services\")\n writer(f\"Showing {count[1]}/{len(methods)} Methods\\n\")",
"def summary(app):\n click.echo(get_summary(app))",
"def describe_endpoint(EndpointName=None):\n pass",
"def create_summary_statistics(forward_accuracy, backward_accuracy, merged_accuracy):\n summary_statistics = open(f'summary_statistics.txt', 'a')\n summary_statistics.write(f'The forward model has an accuracy of: {forward_accuracy}\\n')\n summary_statistics.write(f'The backward model has an accuracy of: {backward_accuracy}\\n')\n summary_statistics.write(f'The merged model has an accuracy of: {merged_accuracy}\\n')\n summary_statistics.close()",
"def summary(self):\n for i,layer in enumerate(self.chain):\n x = Input([2])\n y = layer.forward(x)\n Model(x,y,name=f'layer_{i}_summary').summary()",
"def add_action_summaries(actions, action_specs, name=\"action\"):\n action_specs = tf.nest.flatten(action_specs)\n actions = tf.nest.flatten(actions)\n\n for i, (action, action_spec) in enumerate(zip(actions, action_specs)):\n if len(action_spec.shape) > 1:\n continue\n\n if tensor_spec.is_discrete(action_spec):\n histogram_discrete(\n name=\"%s/%s\" % (name, i),\n data=action,\n bucket_min=action_spec.minimum,\n bucket_max=action_spec.maximum)\n else:\n if len(action_spec.shape) == 0:\n action_dim = 1\n else:\n action_dim = action_spec.shape[-1]\n action = tf.reshape(action, (-1, action_dim))\n\n def _get_val(a, i):\n return a if len(a.shape) == 0 else a[i]\n\n for a in range(action_dim):\n # TODO: use a descriptive name for the summary\n histogram_continuous(\n name=\"%s/%s/%s\" % (name, i, a),\n data=action[:, a],\n bucket_min=_get_val(action_spec.minimum, a),\n bucket_max=_get_val(action_spec.maximum, a))",
"def summary(self, summary: str):\n return self.swag({\n 'summary': summary\n })",
"def endpoint_list(self):\n _, body = self.request('/v1.1/endpoints', 'GET')\n return body",
"def detail(self):\n info = self.info()\n info[u'services'] = {}\n for item in self.get_endpoints():\n try:\n info[u'services'][item.service].append(item.endpoint)\n except:\n info[u'services'][item.service] = [item.endpoint]\n return info",
"def request_endpoints(self):\n\n endpoints_url = self.std[\"api\"]\n endpoints_paramd = {\n \"access_token\": self.std[\"access_token\"]\n }\n\n endpoints_response = requests.get(url=endpoints_url, params=endpoints_paramd)\n print endpoints_response\n self.endpointd = endpoints_response.json()[0]",
"def build_summary(self):\n for k, v in self.metrics.items():\n tf.summary.scalar(k, v)\n \n self.summary_op = tf.summary.merge_all()",
"def get_overview(entities=None):\n \n url = \"{ep}/views/overview\".format(ep=endpoint)\n \n if entities is not None:\n qs = {}\n for e in entities:\n qs.update({'entityId': e})\n \n r = requests.get(url, headers=headers, params=qs)\n else:\n r = requests.get(url, headers=headers)\n \n return r.json()",
"def _forward_summary(self, summaries):\n p = self.params\n for summary_key, summary_value in summaries.items():\n logging.info((summary_key, summary_value))\n summary_type = base_layer.get_summary_type_from_key(summary_key)\n assert summary_value.shape[0] == p.num_stages\n if p.unpack_summaries:\n # unstack summary_value\n unstacked_values = jnp.split(summary_value, p.num_stages)\n for i, v in enumerate(unstacked_values):\n base_layer.add_summary(f'{summary_key}/{i}', v, summary_type)\n else:\n base_layer.add_summary('{summary_key}', summary_value, summary_type)",
"def setup_summary():\n episode_total_reward = tf.Variable(0.)\n\n tf.summary.scalar('Total_Reward/Episode', episode_total_reward)\n\n summary_vars = [episode_total_reward]\n\n summary_placeholders = [tf.placeholder(tf.float32) for _ in range(len(summary_vars))]\n\n update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]\n\n summary_op = tf.summary.merge_all()\n return summary_placeholders, update_ops, summary_op",
"def print_summary(metrics_list, labels_list):\n for metric, name in zip(metrics_list, labels_list):\n print('*' * 108)\n print(name)\n mean_inc_acc = []\n for i in range(metric.shape[0]):\n print('\\t', end='')\n for j in range(metric.shape[1]):\n print('{:5.2f}% '.format(100 * metric[i, j]), end='')\n if np.trace(metric) == 0.0:\n if i > 0:\n avg = 100 * metric[i, :i].mean()\n mean_inc_acc += [avg]\n print('\\tAvg.:{:5.2f}% '.format(avg), end='')\n else:\n avg = 100 * metric[i, :i + 1].mean()\n mean_inc_acc += [avg]\n print('\\tAvg.:{:5.2f}% '.format(avg), end='')\n print()\n print()\n\n # Computing AIA across all incremental states (thus excluding the first non-incremental state)\n print('\\tMean Incremental Acc.: {:5.2f}%'.format(np.mean(mean_inc_acc[1:])))\n print('*' * 108)",
"def account_summary(self):\n pass",
"def _activity_endpoint(self, athlete, filename):\n return '{host}{athlete}/activity/{filename}'.format(\n host=self.host,\n athlete=quote_plus(athlete),\n filename=filename\n )",
"def build_summaries(self):\n\n # Loss summary.\n tf.summary.scalar('loss', self.loss)\n\n merged = tf.summary.merge_all()\n self.summary_op = merged\n tf.logging.info('summary op set')",
"def build_summaries(self):\n\n # Loss summary.\n tf.summary.scalar('loss', self.loss)\n\n merged = tf.summary.merge_all()\n self.summary_op = merged\n tf.logging.info('summary op set')",
"def get_summary(self, **kwargs):\n authorized_args = [\n 'begin', 'end', 'tenant_id', 'service', 'groupby', 'all_tenants']\n if kwargs.get('groupby', None):\n kwargs['groupby'] = ','.join(kwargs['groupby'])\n url = self.get_url('summary', kwargs, authorized_args)\n return self.api_client.get(url).json()",
"def endpoints(self, endpoints):\n\n self._endpoints = endpoints"
] | [
"0.59474593",
"0.5940539",
"0.59343994",
"0.59330446",
"0.5931891",
"0.59268993",
"0.5921793",
"0.59189427",
"0.58822054",
"0.56461763",
"0.5312166",
"0.5241216",
"0.51119596",
"0.51063263",
"0.50800484",
"0.50588953",
"0.50553143",
"0.5045727",
"0.5015067",
"0.501194",
"0.49936566",
"0.4982028",
"0.49771336",
"0.49707723",
"0.4926592",
"0.49047846",
"0.489371",
"0.489371",
"0.4886755",
"0.4883833"
] | 0.7658504 | 0 |
Executes SSM document for given document name and input parameters. | def execute(self, document_name, input_params):
if self._document_exists(document_name):
self.logger.info("Executing SSM document [%s] with parameters: [%s]", document_name, input_params)
# Executing SSM document
execution_id = self.ssm_client.start_automation_execution(
DocumentName=document_name,
# DocumentVersion=version,
Parameters=input_params
)['AutomationExecutionId']
self.logger.info(f'SSM execution URL: {self.get_execution_url(execution_id)}')
return execution_id
else:
error_msg = "SSM document with name [{}] does not exist.".format(document_name)
self.logger.error(error_msg)
raise Exception(error_msg) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def runQuery(cred, structuredQuery):\n url = cred.base_url + \"documents:runQuery\"\n\n makeRequest(cred, url, 'POST', structuredQuery)",
"def _send_command_to_nodes(self, document_name, parameters, node_ids):\n logger.debug(\"Sending SSM command to {} node(s). Document name: {}. \"\n \"Parameters: {}.\".format(\n len(node_ids), document_name, parameters))\n response = self.ssm_client.send_command(\n InstanceIds=self.node_ids,\n DocumentName=document_name,\n Parameters=parameters,\n MaxConcurrency=str(min(len(self.node_ids), 100)),\n MaxErrors=\"0\")\n return response",
"def main():\n sdoc_application = SDocApplication()\n sdoc_application.run()",
"def run_script(self, params, config_no):\n raise NotImplementedError()",
"def doc(caesar, input):\n name = input.group(1)\n name = name.lower()\n\n if caesar.doc.has_key(name): \n caesar.reply(caesar.doc[name][0])\n if caesar.doc[name][1]: \n caesar.say('e.g. ' + caesar.doc[name][1])",
"def edit_document():",
"def run_docs(self, *docs):\n self.docs = docs\n self.run()",
"def _handle(self) -> int:\n sdoc = SDoc()\n sdoc.io = SDocIO(self.io.input, self.io.output, self.io.error_output)\n sdoc.config_path = self.argument('config.cfg')\n sdoc.init()\n\n return sdoc.run_sdoc2(self.argument('main.sdoc2'))",
"def ssm_run_command():\n try:\n table_name = CONTENT_TABLE_NAME\n ssm_client = boto3.client('ssm', config=MSAM_BOTO3_CONFIG)\n db_resource = boto3.resource('dynamodb', config=MSAM_BOTO3_CONFIG)\n db_table = db_resource.Table(table_name)\n instance_ids = {}\n items = []\n # get all the managed instances from the DB with tag MSAM-NodeType\n response = db_table.query(\n IndexName=\"ServiceRegionIndex\",\n KeyConditionExpression=Key(\"service\").eq(\"ssm-managed-instance\"),\n FilterExpression=\"contains(#data, :tagname)\",\n ExpressionAttributeNames={\"#data\": \"data\"},\n ExpressionAttributeValues={\":tagname\": \"MSAM-NodeType\"}\n )\n if \"Items\" in response:\n items = response[\"Items\"]\n while \"LastEvaluatedKey\" in response:\n response = db_table.query(\n IndexName=\"ServiceRegionIndex\",\n KeyConditionExpression=Key(\"service\").eq(\"ssm-managed-instance\"),\n FilterExpression=\"contains(#data, :tagname)\",\n ExpressionAttributeNames={\"#data\": \"data\"},\n ExpressionAttributeValues={\":tagname\": \"MSAM-NodeType\"},\n ExclusiveStartKey=response['LastEvaluatedKey']\n )\n if \"Items\" in response:\n items.append(response[\"Items\"])\n\n for item in items:\n data = json.loads(item['data'])\n if \"MSAM-NodeType\" in data[\"Tags\"]:\n instance_ids[data['Id']] = data['Tags']['MSAM-NodeType']\n\n # get all the SSM documents applicable to MSAM, filtering by MSAM-NodeType tag\n # When we support more than just ElementalLive, add to the list of values for MSAM-NodeType during filtering\n document_list = ssm_client.list_documents(\n Filters=[\n {\n 'Key': 'tag:MSAM-NodeType',\n 'Values': [\n 'ElementalLive',\n ]\n },\n {\n 'Key': 'Owner',\n 'Values': [\n 'Self'\n ]\n }\n ]\n )\n document_ids = document_list['DocumentIdentifiers']\n while \"NextToken\" in document_list:\n document_list = ssm_client.list_documents(\n Filters=[\n {\n 'Key': 'tag:MSAM-NodeType',\n 'Values': [\n 'ElementalLive',\n ]\n },\n {\n 'Key': 'Owner',\n 'Values': [\n 'Self'\n ]\n }\n ],\n NextToken=document_list[\"NextToken\"]\n )\n document_ids.append(document_list['DocumentIdentifiers'])\n\n document_names = {}\n for document in document_ids:\n if \"Tags\" in document:\n for tag in document[\"Tags\"]:\n if tag['Key'] == \"MSAM-NodeType\":\n document_names[document[\"Name\"]] = tag['Value']\n\n # loop over all instances and run applicable commands based on node type\n for id, id_type in instance_ids.items():\n for name, doc_type in document_names.items():\n if id_type in doc_type:\n # maybe eventually doc type could be comma-delimited string if doc applies to more than one type?\n print(\"running command: %s on %s \" % (name, id))\n try:\n response = ssm_client.send_command(\n InstanceIds=[\n id,\n ],\n DocumentName=name,\n TimeoutSeconds=600,\n Parameters={\n },\n MaxConcurrency='50',\n MaxErrors='0',\n CloudWatchOutputConfig={\n 'CloudWatchLogGroupName': SSM_LOG_GROUP_NAME,\n 'CloudWatchOutputEnabled': True\n }\n )\n print(response)\n except ClientError as error:\n print(error)\n if error.response['Error']['Code'] == \"InvalidInstanceId\":\n continue\n except ClientError as error:\n print(error)",
"def test_single_document_processing(self):\n print('submitting document...')\n\n for doc in self.DOCS:\n result = self.client.submit_document(doc)\n\n from pprint import pprint\n print(result)\n self.assertTrue(result != \"\")",
"def oparl_documentsss():\n start_time = time.time()\n jsonp_callback = request.args.get('callback', None)\n ref = request.args.get('reference', '')\n references = ref.split(',')\n if references == ['']:\n references = None\n output = request.args.get('output', '').split(',')\n rs = util.get_rs()\n q = request.args.get('q', '*:*')\n fq = request.args.get('fq', '')\n sort = request.args.get('sort', 'score desc')\n start = int(request.args.get('start', '0'))\n numdocs = int(request.args.get('docs', '10'))\n date_param = request.args.get('date', '')\n get_attachments = 'attachments' in output\n get_thumbnails = 'thumbnails' in output and get_attachments\n get_consultations = 'consultations' in output\n get_facets = 'facets' in output\n #get_relations = 'relations' in output\n request_info = {} # Info über die Anfrage\n query = False\n docs = False\n submission_ids = []\n # TODO: entscheiden, was mit get_relations passiert\n \"\"\"\n Anhand der übergebenen Parameter wird entschieden, ob eine ES-Suche\n durchgeführt wird, oder ob die Abfrage direkt anhand von Kennungen\n (references) erfolgen kann.\n \"\"\"\n \n if references is None:\n # Suche wird durchgeführt\n # (References-Liste via Suchmaschine füllen)\n query = db.query_submissions(rs=rs, q=q, fq=fq, sort=sort, start=start,\n docs=numdocs, date=date_param, facets=get_facets)\n if query['numhits'] > 0:\n submission_ids = [x['_id'] for x in query['result']]\n else:\n docs = []\n else:\n # Direkte Abfrage\n request_info = {\n 'references': references\n }\n request_info['output'] = output\n\n # Abrufen der benötigten Dokumente aus der Datenbank\n if references is not None:\n docs = db.get_submissions(rs=rs, references=references,\n get_attachments=get_attachments,\n get_consultations=get_consultations,\n get_thumbnails=get_thumbnails)\n elif len(submission_ids) > 0:\n docs = db.get_submissions(rs=rs, submission_ids=submission_ids,\n get_attachments=get_attachments,\n get_consultations=get_consultations,\n get_thumbnails=get_thumbnails)\n\n ret = {\n 'status': 0,\n 'duration': int((time.time() - start_time) * 1000),\n 'request': request_info,\n 'response': {}\n }\n if docs:\n ret['response']['documents'] = docs\n ret['response']['numdocs'] = len(docs)\n if query and 'maxscore' in query:\n ret['response']['maxscore'] = query['maxscore']\n for n in range(len(docs)):\n docs[n]['reference'] = docs[n]['identifier']\n del docs[n]['identifier']\n\n if query:\n ret['response']['numhits'] = query['numhits']\n if get_facets and 'facets' in query:\n ret['response']['facets'] = query['facets']\n \n ret['response']['start'] = start\n ret['request']['sort'] = sort\n ret['request']['fq'] = fq\n\n json_output = json.dumps(ret, cls=util.MyEncoder, sort_keys=True)\n if jsonp_callback is not None:\n json_output = jsonp_callback + '(' + json_output + ')'\n response = make_response(json_output, 200)\n response.mimetype = 'application/json'\n response.headers['Expires'] = util.expires_date(hours=24)\n response.headers['Cache-Control'] = util.cache_max_age(hours=24)\n return response",
"def main():\n \"\"\"Calculates BM25 and VSM score\"\"\"\n\n queries, docs, term_freq_corpus = io() \n DocQ = DocumentQuery(docs, queries[0].split(\" \"), [], {}, {}, set(), term_freq_corpus)\n #print(queries[0].split(\" \"))\n DocQ.init_doc_query(queries[0].split(\" \"), docs)\n DocQ.init_term_freq()\n for i, d in enumerate(docs):\n bm25 = DocQ.bm25_score(queries[0].split(\" \"), d)\n print(docs[i], bm25)\n\n vsm.document_filenames = {i:d for i, d in enumerate(docs)}\n vsm.N = len(docs)\n vsm.query = queries[0]\n vsm.initialize_terms_and_postings()\n vsm.initialize_document_frequencies()\n vsm.initialize_lengths()\n vsm.do_search()",
"def run_design(design_func, message_box_on_error=True, print_runtime=True, document_name=None,\n design_args=None, design_kwargs=None):\n # noinspection PyBroadException\n try:\n start = time.time()\n if not document_name:\n frame = inspect.stack()[1]\n module = inspect.getmodule(frame[0])\n filename = module.__file__\n document_name = pathlib.Path(filename).stem\n setup_document(document_name)\n design_func(*(design_args or ()), **(design_kwargs or {}))\n end = time.time()\n if print_runtime:\n print(\"Run time: %f\" % (end-start))\n except Exception:\n print(traceback.format_exc())\n if message_box_on_error:\n ui().messageBox('Failed:\\n{}'.format(traceback.format_exc()))",
"def savedoc():\r\n document.save('QSDoc_{0}_{1}_{2}_{3}.docx'.format(args.server, year, month, day))",
"def _ssm_command_waiter(self, document_name, parameters,\n retry_failed=True):\n\n # This waiter differs from the built-in SSM.Waiter by\n # optimistically waiting for the command invocation to\n # exist instead of failing immediately, and by resubmitting\n # any failed command until all retry attempts are exhausted\n # by default.\n response = self._send_command_to_all_nodes(\n document_name,\n parameters,\n )\n command_id = response[\"Command\"][\"CommandId\"]\n\n cloudwatch_config = self.provider_config[\"cloudwatch\"]\n agent_retryer_config = cloudwatch_config \\\n .get(CloudwatchConfigType.AGENT.value) \\\n .get(\"retryer\", {})\n max_attempts = agent_retryer_config.get(\"max_attempts\", 120)\n delay_seconds = agent_retryer_config.get(\"delay_seconds\", 30)\n num_attempts = 0\n for node_id in self.node_ids:\n while True:\n num_attempts += 1\n logger.debug(\"Listing SSM command ID {} invocations on node {}\"\n .format(command_id, node_id))\n response = self.ssm_client.list_command_invocations(\n CommandId=command_id,\n InstanceId=node_id,\n )\n cmd_invocations = response[\"CommandInvocations\"]\n if not cmd_invocations:\n logger.debug(\n \"SSM Command ID {} invocation does not exist. If \"\n \"the command was just started, it may take a \"\n \"few seconds to register.\".format(command_id))\n else:\n if len(cmd_invocations) > 1:\n logger.warning(\n \"Expected to find 1 SSM command invocation with \"\n \"ID {} on node {} but found {}: {}\".format(\n command_id,\n node_id,\n len(cmd_invocations),\n cmd_invocations,\n ))\n cmd_invocation = cmd_invocations[0]\n if cmd_invocation[\"Status\"] == \"Success\":\n logger.debug(\n \"SSM Command ID {} completed successfully.\"\n .format(command_id))\n break\n if num_attempts >= max_attempts:\n logger.error(\n \"Max attempts for command {} exceeded on node {}\"\n .format(command_id, node_id))\n raise botocore.exceptions.WaiterError(\n name=\"ssm_waiter\",\n reason=\"Max attempts exceeded\",\n last_response=cmd_invocation,\n )\n if cmd_invocation[\"Status\"] == \"Failed\":\n logger.debug(f\"SSM Command ID {command_id} failed.\")\n if retry_failed:\n logger.debug(\n f\"Retrying in {delay_seconds} seconds.\")\n response = self._send_command_to_nodes(\n document_name, parameters, node_id)\n command_id = response[\"Command\"][\"CommandId\"]\n logger.debug(\"Sent SSM command ID {} to node {}\"\n .format(command_id, node_id))\n else:\n logger.debug(\n f\"Ignoring Command ID {command_id} failure.\")\n break\n time.sleep(delay_seconds)",
"def execute(self, name=None, clientRequestToken=None):\n params = {'name': name if name else self.resource_id}\n if clientRequestToken:\n params.update({\"clientRequestToken\": clientRequestToken})\n self.logger.debug('Executing {resource_type} with parameters:'\n ' {params}'.format(resource_type=self.type_name,\n params=params))\n\n return self.client.start_pipeline_execution(**params)",
"def get_document_by_name(update, name_or_id):\n sc_api = SmartCAT(SMARTCAT_API_USERNAME, SMARTCAT_API_PASSWORD)\n try:\n document = sc_api.project.get_document_by_name(SMARTCAT_PROJECT_ID, name_or_id)\n except SmartcatException as e:\n logging.error('Error getting document: {0} {1}'.format(e.code, e.message))\n update.message.reply_text(SHIT_HAPPENS)\n return None\n\n if not document:\n logging.warning('Document not found')\n update.message.reply_text(NOTHING_FOUND)\n return None\n\n return document",
"def exposed_execute(self, text):\n execute(text, PublicService.exposed_namespace)",
"def invoke(self):\n print(\"\\nEnter Book Name: \", end=\"\")\n # get option from user, and strip whitespace\n str_option = input().strip()\n if not str_option:\n print(\"Invalid Input!\")\n return\n self.sbh.display_books(\n self.db.query_book_by_title(str_option)\n )",
"def set_document_name_for_search(self, document_name):\n self.set_value_into_input_field(self.document_name_locator, document_name)",
"def executeQuery(es_client, index_name, query):\n try:\n result = es_client.search(index=index_name, body=query)\n except:\n etype, evalue, etb = sys.exc_info()\n logger.error('The query %s failed. Exception: %s, Error: %s.' % (query, etype, evalue))\n sys.exit(255)\n return result",
"def invoke(self):\n print(\"\\nEnter Author Name: \", end=\"\")\n # get option from user, and strip whitespace\n str_option = input().strip()\n if not str_option:\n print(\"Invalid Input!\")\n return\n self.sbh.display_books(\n self.db.query_book_by_author(str_option)\n )",
"def send_tag_run_command(session, document_name, commands, target_key, tag_value, comment):\n try:\n ssm = session.client('ssm')\n except ClientError as err:\n logger.error(\"Run Command Failed!\\n%s\", str(err))\n return False\n \n try:\n resp = ssm.send_command(\n Targets=[\n {\n 'Key': target_key,\n 'Values': [\n tag_value,\n ]\n },\n ],\n DocumentName=document_name,\n Parameters={\n 'commands': commands,\n 'executionTimeout': ['600'] # Seconds all commands have to complete in\n },\n Comment=comment\n )\n logger.info('============RunCommand using Tag Name sent successfully, CommandID: ' + resp['Command']['CommandId'])\n return resp['Command']['CommandId']\n except ClientError as err:\n if 'ThrottlingException' in str(err):\n logger.info(\"RunCommand throttled, automatically retrying...\")\n send_tag_run_command(session, document_name, commands, target_key, tag_value, comment)\n else:\n logger.error(\"Run Tag Command Failed!\\n%s\", str(err))\n return False",
"def main():\n\n global final_dictionary\n global final_doc_set\n\n input_query = input(\"Please enter query for search: \")\n\n # Retrieving positional inverted index for query terms\n final_dictionary = fetch_dictionary(input_query.lower()) # Query is converted to lowercase as pre-process step\n\n #The final set of document IDs is retrieved below\n fetch_posting_list(input_query)\n sc = tf_idf_score()\n output = fetch_document_contents(input_query, sc)\n print(output)\n output_file = open(RESULT_FILE, 'a')\n output_file.write(output)\n output_file.write('\\n##############################################################\\n')\n output_file.close()\n\n print(\"Query results also appended to file: {0}\".format(RESULT_FILE))",
"def GetDocument(self, *args, **kwargs):\n pass",
"def test_search_with_scoring_and_params(context):\n # When create a query block\n t = QuerySet(\"localhost\", index=\"foo\")\n\n # And there are records\n add_document(\"foo\", {\"bar\": 1})\n add_document(\"foo\", {\"bar\": 2})\n add_document(\"foo\", {\"bar\": 3})\n\n # And I add scoring with params\n score = ScriptScore(\"s = custom_param + doc['bar'].value\", params={\"custom_param\": 1})\n t.score(score)\n results = t[0:10]\n\n # Then my results are scored correctly\n len(results).should.equal(3)\n results[0][\"_source\"][\"bar\"].should.equal(3)\n results[1][\"_source\"][\"bar\"].should.equal(2)\n results[2][\"_source\"][\"bar\"].should.equal(1)",
"def execute(\n name: str,\n *args: Any,\n **kwargs: Any\n ) -> None:\n cherrypy.engine.publish(name, *args, **kwargs) # type: ignore",
"def main():\n creds = None\n # The file token.pickle stores the user's access and refresh tokens, and is\n # created automatically when the authorization flow completes for the first time.\n if osp.exists(TOKEN):\n with open(TOKEN, 'rb') as token:\n creds = pickle.load(token)\n\n # If there are no (valid) credentials available, let the user log in.\n if not creds or not creds.valid:\n if creds and creds.expired and creds.refresh_token:\n creds.refresh(Request())\n else:\n flow = InstalledAppFlow.from_client_secrets_file(CREDENTIALS, CURRENT_SCOPE)\n creds = flow.run_local_server()\n # Save the credentials for the next run\n with open(TOKEN, 'wb') as token:\n pickle.dump(creds, token, pickle.HIGHEST_PROTOCOL)\n\n service = build('docs', 'v1', credentials=creds)\n\n # Do a document \"get\" request and print the results as formatted JSON\n document = service.documents().get(documentId = DOCUMENT_ID).execute()\n # print(json.dumps(document, indent=4))\n\n doc_title = document.get('title')\n print(\"The title of the document is: {}\".format(doc_title))\n # print('The body of the document is: {}'.format(document.get('body')))\n\n # print document as json file -- add a timestamp to get a unique file name\n out_file = doc_title + '.' + now + \".json\"\n print(\"out_file is '{}'\".format(out_file))\n fp = open(out_file, 'w')\n json.dump(document, fp, indent=4)\n\n print('PROGRAM ENDED.')",
"def doc(update: Update, context: CallbackContext):\n language_code = update.effective_user.language_code\n args = context.args\n if_admin = database.get_user_attr('admin', user_id=update.effective_user.id)\n if len(args) > 2:\n text = get_text('quantity_error_doc_text', language_code).text()\n else:\n if len(args) == 0:\n text = get_text('doc_text', language_code).text({'command': consts.ALL, 'admin': if_admin})\n else:\n if args[0] not in consts.DOC_COMMANDS:\n text = get_text('wrong_command_error_doc_text', language_code).text()\n else:\n text = get_text('doc_text', language_code).text({'command': args[0], 'admin': if_admin})\n if not if_admin and args[0] == 'admin':\n text += get_text('doc_unavailable_text', language_code).text()\n cf.send_message(\n context=context,\n chat_id=update.effective_chat.id,\n text=text,\n )",
"def update(self, doc):\n if app.config.get(\"READ_ONLY_MODE\", False) and app.config.get(\"SCRIPTS_READ_ONLY_MODE\", False):\n app.logger.warn(\"System is in READ-ONLY mode, update command cannot run\")\n return\n\n return requests.post(self.target() + self.id + \"/_update\", data=json.dumps({\"doc\": doc}))"
] | [
"0.5480394",
"0.5434591",
"0.5376649",
"0.52243423",
"0.5214718",
"0.52023363",
"0.5189893",
"0.5051979",
"0.5050216",
"0.49367806",
"0.49262178",
"0.49162",
"0.4915455",
"0.48682842",
"0.48337287",
"0.4806876",
"0.48034984",
"0.47985923",
"0.47829112",
"0.47478285",
"0.47474623",
"0.47403404",
"0.473556",
"0.47337866",
"0.47113633",
"0.46896487",
"0.46817455",
"0.4677019",
"0.46671656",
"0.46634454"
] | 0.8131812 | 0 |
Returns SSM document final execution status, if status is in PROGRESS/PENDING it will wait till SSM document execution will be completed. | def wait_for_execution_completion(self, execution_id, document_name=None):
# Fetch ssm execution status
status = self._get_execution_status(execution_id, document_name)
# Wait for execution to be completed
while status == 'InProgress' or status == 'Pending' or status == 'Cancelling' or status == 'Waiting':
time.sleep(constants.sleep_time_secs)
status = self._get_execution_status(execution_id, document_name)
return status | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_execution_status(self, execution_id, document_name=None):\n execution = self.ssm_client.get_automation_execution(\n AutomationExecutionId=execution_id\n )\n # TODO(semiond): we can remove document name as parameter, can take it by execution id.\n document_name = document_name if document_name else execution['AutomationExecution']['DocumentName']\n step_executions = execution['AutomationExecution']['StepExecutions']\n step = self._get_step_by_status(step_executions, 'InProgress')\n if step:\n step_name = step['StepName']\n self.logger.info(f'Waiting SSM document step [{document_name}>{step_name}] to be completed: '\n f'{self.get_execution_step_url(execution_id, step_name, step_executions)}')\n return execution['AutomationExecution']['AutomationExecutionStatus']",
"def status(self):\n with self.__lock:\n assert(self.__complete)\n return self.__status",
"def status(self):\n assert(self.__complete)\n return self.__status",
"def get_status(self):\n if self._is_running():\n return \"RUNNING\"\n elif self._has_error():\n # The run started but failed\n return \"FAILED\"\n elif self._is_finished():\n # The run was finished\n return \"FINISHED\"\n elif self.current_step() >= 0:\n # The run started at some point but was not completed\n return \"INCOMPLETE\"\n else:\n # The run did not start\n return \"NOT STARTED\"",
"def get_completionStatus(self):\n val = self.collection.get_cdmi_sys_meta().get(\"cdmi_completionStatus\",\n \"Complete\")\n return val",
"def get_completionStatus(self):\n val = self.resource.get_cdmi_sys_meta().get(\"cdmi_completionStatus\",\n \"Complete\")\n return val",
"def status(self) -> pulumi.Output['outputs.JobStatus']:\n return pulumi.get(self, \"status\")",
"def container_status(self):\n if self.status == 'complete':\n return 'complete'\n try:\n task_status = self._ecs.describe_tasks(tasks=[self.name])['tasks'][0]['lastStatus']\n return task_status\n except (IndexError, ClientError):\n return 'STOPPED'",
"def complete(self):\n logger.info(\"complete()\")\n if self.complete_status is None:\n raise RuntimeError(\"No collection in progress\")\n\n return self.complete_status",
"def get_status(self):\n\t\treturn call_sdk_function('PrlJob_GetStatus', self.handle)",
"def status(self):\n return self.job_proto.status",
"def status(self):\n\t\treturn self._status",
"def status():\n return 'OK'",
"def status(self) -> str:\n return self._check_job_status()",
"def evaluation_status(self):\n return self._evaluation_status",
"def custom_wait_for_completion(task_description, output):\n state = 'UNSUBMITTED'\n while not (state == 'COMPLETED' or state =='FAILED'):\n output.add_live_msg(ms.STATUS.format(state))\n time.sleep(5)\n \n #search for the task in task_list\n for task in task_description:\n current_task = gs.isTask(task)\n if current_task:\n state = current_task.state\n if state == 'RUNNING' or state == 'FAILED': \n break\n \n return state",
"def status(self):\n if self.num_steps >= self.timeout:\n return Status.TIMEOUT\n\n return Status.IN_PROGRESS",
"def status(self):\r\n return self._status",
"def status(self):\r\n return self._status",
"def status(self):\n return self.__status",
"def status(self):\n return self.__status",
"def status(self) -> pulumi.Output['outputs.AssessmentStatusResponse']:\n return pulumi.get(self, \"status\")",
"def get_raw_status(self):\n self.__param_lock.acquire()\n status = self.__status\n self.__param_lock.release()\n return status",
"def status(self):\n if hasattr(self, \"_status\"):\n return self._status\n else:\n return None",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status",
"def status(self):\n return self._status"
] | [
"0.68110305",
"0.6518064",
"0.65054107",
"0.6139724",
"0.6080517",
"0.6051617",
"0.59565747",
"0.59161776",
"0.58984363",
"0.586592",
"0.5834591",
"0.58224225",
"0.57734585",
"0.57674146",
"0.5753716",
"0.5740925",
"0.5731959",
"0.57306814",
"0.57306814",
"0.5700119",
"0.5700119",
"0.56947446",
"0.5692712",
"0.5687688",
"0.56622225",
"0.56622225",
"0.56622225",
"0.56622225",
"0.56622225",
"0.56622225"
] | 0.6551534 | 1 |
Returns SSM document step output for given execution id, step name and output key. | def get_step_output(self, execution_id, step_name, output_key):
execution = self.ssm_client.get_automation_execution(
AutomationExecutionId=execution_id
)
step_executions = execution['AutomationExecution']['StepExecutions']
step = self._get_step_by_name(step_executions, step_name)
if step and step.get('Outputs') and step.get('Outputs').get(output_key):
return step['Outputs'][output_key][0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_step_output_uri(self, step):\n # parse in reverse order, in case there are multiple -output args\n args = step.args()\n for i, arg in reversed(list(enumerate(args[:-1]))):\n if arg == '-output':\n return args[i + 1]\n else:\n return None",
"def get_execution_step_url(self, execution_id: str, step_name: str, steps: [] = None) -> str:\n if not steps or len(steps) < 1:\n execution = self.ssm_client.get_automation_execution(AutomationExecutionId=execution_id)\n steps = execution['AutomationExecution']['StepExecutions']\n\n step = self._get_step_by_name(steps, step_name)\n if not step:\n raise Exception(f'SSM document step [{step_name}] does not exist in execution: '\n f'{self.get_execution_url(execution_id)}')\n step_execution_id = step['StepExecutionId']\n step_index = self._get_step_execution_index(steps, step_name)\n return f'https://{self.region}.console.aws.amazon.com/systems-manager/automation/execution/{execution_id}' \\\n f'/step/{step_index}/{step_execution_id}'",
"def output(self) -> pulumi.Output[Optional['outputs.JobStepOutputResponse']]:\n return pulumi.get(self, \"output\")",
"def get_document_output(document_id: Optional[pulumi.Input[str]] = None,\n location: Optional[pulumi.Input[str]] = None,\n project: Optional[pulumi.Input[Optional[str]]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetDocumentResult]:\n ...",
"def output(self) -> Optional[pulumi.Input['JobStepOutputArgs']]:\n return pulumi.get(self, \"output\")",
"def get_outputs(step):\n params = step.get('parameters', {})\n outputs = params.get('outputs', [])\n for single_output in ['output', 'src_output', 'tgt_output']:\n if single_output in params:\n outputs.append(params[single_output])\n return outputs",
"def get_call_output(self, s3_output_key):\n return self.get_object(s3_output_key)",
"def task_stdout(self, task_id):\n result, _ = self.task_collect(task_id, wait=False)\n return result['shards'][0]['output']",
"def get_output(self, name_dict):\n return self.expand_vars(self.options.output_pattern, name_dict)",
"def result(self, step):\n indent_extra = 0\n if self.current_rule:\n indent_extra = self.indent_size\n\n step = self.steps.pop(0)\n indent = make_indentation(2 * self.indent_size + indent_extra)\n if self.show_aligned_keywords:\n # -- RIGHT-ALIGN KEYWORDS (max. keyword width: 6):\n text = u\"%s%6s %s ... \" % (indent, step.keyword, step.name)\n else:\n text = u\"%s%s %s ... \" % (indent, step.keyword, step.name)\n self.stream.write(text)\n\n status_text = step.status.name\n if self.show_timings:\n status_text += \" in %0.3fs\" % step.duration\n\n unicode_errors = 0\n if step.error_message:\n try:\n self.stream.write(u\"%s\\n%s\\n\" % (status_text, step.error_message))\n except UnicodeError as e:\n unicode_errors += 1\n self.stream.write(u\"%s\\n\" % status_text)\n self.stream.write(u\"%s while writing error message: %s\\n\" % \\\n (e.__class__.__name__, e))\n if self.RAISE_OUTPUT_ERRORS:\n raise\n else:\n self.stream.write(u\"%s\\n\" % status_text)\n\n if self.show_multiline:\n if step.text:\n try:\n self.doc_string(step.text)\n except UnicodeError as e:\n unicode_errors += 1\n self.stream.write(u\"%s while writing docstring: %s\\n\" % \\\n (e.__class__.__name__, e))\n if self.RAISE_OUTPUT_ERRORS:\n raise\n if step.table:\n self.table(step.table)",
"def get_output(self, **kwargs):\n return self.out",
"def get_output_value(description, key):\n\n outputs = [o for o in description['Outputs'] if o['OutputKey'] == key]\n return None if len(outputs) != 1 else outputs[0]['OutputValue']",
"def get_task_output(self, task, output_id):\n output_record = self._read_transaction(tx.get_task_output, task=task, output_id=output_id)\n return _reconstruct_task_output(output_record[\"o\"])",
"def outputRetrieved(self, blTaskName, rng):\n return self._genericCommand('outputRetrieved', blTaskName, rng)",
"def get_task_output(self, task, output_id):\n return self._gdb_interface.get_task_output(task, output_id)",
"def get_assessment_output(assessment_id: Optional[pulumi.Input[str]] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> pulumi.Output[GetAssessmentResult]:\n ...",
"def step_id(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"step_id\")",
"def _get_output_snippet(path, beam, snippet_name):\n snippet = getattr(madx_snippets, snippet_name)\n ids = IDS.copy()\n\n return {out: snippet(path, \"b{:d}.{:s}\".format(beam, ids[out]))\n for out in ids.keys()}",
"def getStep():\n # TODO: can there be non-Step logs?",
"def output(section):\n target = outputs.get(section, str(config_output))\n if not target or target == \"stdout\":\n return StdOutput()\n elif callable(target):\n return CallOutput(target)\n else:\n return FileOutput(target, \"w\")",
"def output(self, name):\r\n m = WorkUnit._valid_name_re.match(name)\r\n if not m or m.group(0) != name:\r\n raise Exception('Invalid output name: %s' % name)\r\n if name not in self._outputs:\r\n path = os.path.join(self.run_tracker.info_dir, 'tool_outputs', '%s.%s' % (self.id, name))\r\n safe_mkdir_for(path)\r\n self._outputs[name] = FileBackedRWBuf(path)\r\n return self._outputs[name]",
"def transform(self, data):\n if data:\n assert isinstance(data, dict), 'Step {}, \"data\" argument in the \"transform()\" method must be dict, ' \\\n 'got {} instead.'.format(self.name, type(data))\n logger.info('Step {}, working in \"{}\" mode'.format(self.name, self._mode))\n\n if self.output_is_cached:\n logger.info('Step {} using cached output'.format(self.name))\n step_output_data = self.output\n elif self.output_is_persisted and self.load_persisted_output:\n logger.info('Step {} loading persisted output from {}'.format(self.name,\n self.experiment_directory_output_step))\n step_output_data = self._load_output(self.experiment_directory_output_step)\n else:\n step_inputs = {}\n if self.input_data is not None:\n for input_data_part in self.input_data:\n step_inputs[input_data_part] = data[input_data_part]\n\n for input_step in self.input_steps:\n input_step._mode = self._mode\n step_inputs[input_step.name] = input_step.transform(data)\n\n if self.adapter:\n step_inputs = self._adapt(step_inputs)\n else:\n step_inputs = self._unpack(step_inputs)\n step_output_data = self._transform_operation(step_inputs)\n logger.info('Step {}, transform completed'.format(self.name))\n return step_output_data",
"def step_key(self) -> str:\n return self._step_execution_context.step.key",
"def _get_output(self):\n return self.__output",
"def _get_output(self):\n return self.__output",
"def _get_output(self):\n return self.__output",
"def _get_output(self):\n return self.__output",
"def _get_output(self):\n return self.__output",
"def _get_output(self):\n return self.__output",
"def get_output(self):\n return self.output"
] | [
"0.59275943",
"0.5889858",
"0.58484614",
"0.57326436",
"0.56344503",
"0.55983704",
"0.55777663",
"0.5569992",
"0.5539353",
"0.5533122",
"0.54646003",
"0.54497415",
"0.5416543",
"0.5371535",
"0.53153986",
"0.5273433",
"0.52668524",
"0.522895",
"0.5222754",
"0.5222012",
"0.5202612",
"0.5199582",
"0.5198398",
"0.51868814",
"0.51868814",
"0.51868814",
"0.51868814",
"0.51868814",
"0.51868814",
"0.5155224"
] | 0.79215145 | 0 |
Cancels SSM document execution in waits till 'TriggerRollback' step triggered SSM execution is completed. | def cancel_execution_with_rollback(self, execution_id: str):
execution_url = self.get_execution_url(execution_id)
try:
self.logger.info("Canceling SSM execution: {}".format(execution_url))
self.ssm_client.stop_automation_execution(AutomationExecutionId=execution_id, Type='Cancel')
self.wait_for_execution_completion(execution_id)
rollback_execution_id = self.get_step_output(execution_id, constants.rollback_step_name,
constants.rollback_execution_id_output_name)
if rollback_execution_id:
rollback_execution_url = self.get_execution_url(rollback_execution_id)
self.logger.info(f"Waiting [RollbackExecution] completed SSM execution: {rollback_execution_url}")
self.wait_for_execution_completion(rollback_execution_id)
except ClientError as e:
self.logger.error("Failed to cancel SSM execution [%s] due to: %s", execution_url, e.response)
raise e | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cancel(self):\n self.session.rollback()",
"def rollback(self) -> None:\n with self.lock:\n self.wait(self._rollback_gen())",
"def test_cancel(self) -> None:\n context: Dict[str,ArtifactDescriptor] = dict()\n cmd = pycell.python_cell(\n source='import time\\ntime.sleep(5)',\n validate=True\n )\n controller = FakeWorkflowController()\n self.backend.execute_async(\n task=TaskHandle(\n task_id='000',\n project_id=self.PROJECT_ID,\n controller=controller\n ),\n command=cmd,\n artifacts=context\n )\n time.sleep(1)\n self.backend.cancel_task('000')\n time.sleep(5)\n self.assertIsNone(controller.task_id)\n self.assertIsNone(controller.state)",
"def abort(self):\n if self.transaction:\n token = self.transaction\n self.transaction = None\n self.client.abort(self.creds, token, self.environment)",
"def rollback_action(args, kwargs, was_interrupted, result=None):\n raise NotImplementedError()",
"def cancel(self):\n self.succeeded = False\n self.reject()",
"def cancel(self):\n self.succeeded = False\n self.reject()",
"def cancel(self):\n self.succeeded = False\n self.reject()",
"def cancel(self):\n self.succeeded = False\n self.reject()",
"def cancel(self):\n\n query = f\"scancel {self.jobid}\"\n if self.cluster:\n query = f\"scancel {self.jobid} --clusters={self.cluster}\"\n\n cmd = BuildTestCommand(query)\n cmd.execute()\n logger.debug(f\"Cancelling Job: {self.jobid} by running: {query}\")\n\n self.poll()\n self._state = \"CANCELLED\"",
"async def cancel(id: UUID):\n async with get_client() as client:\n cancelling_state = State(type=StateType.CANCELLED, name=\"Cancelling\")\n try:\n result = await client.set_flow_run_state(\n flow_run_id=id, state=cancelling_state\n )\n except ObjectNotFound as exc:\n exit_with_error(f\"Flow run '{id}' not found!\")\n\n if result.status == SetStateStatus.ABORT:\n exit_with_error(\n f\"Flow run '{id}' was unable to be cancelled. Reason: '{result.details.reason}'\"\n )\n\n exit_with_success(f\"Flow run '{id}' was succcessfully scheduled for cancellation.\")",
"def _cancel(self):\n client = SBusClient(self.storlet_pipe_path)\n try:\n resp = client.cancel(self.task_id)\n if not resp.status:\n raise StorletRuntimeException('Failed to cancel task')\n except SBusClientException:\n raise StorletRuntimeException('Failed to cancel task')",
"def do_cancel(self):\r\n self.write({'cancelled': True})",
"def svn_fs_abort_txn(*args):\r\n return _fs.svn_fs_abort_txn(*args)",
"def rollback(self, stage, enodes, exception):",
"async def cancel_shielded_checkpoint(cls) -> None:\n with cls.create_cancel_scope(shield=True):\n await cls.sleep(0)",
"def cancel_work(self, message=\"This test was cancelled by lifeguard script because the test was interrupting progress (taking too long).\"):\n slick = SlickAsPy(self.environment.slickurl + \"/api\")\n status = slick.get_host_status(self.name)\n if status['currentWork'] is not None:\n slick.cancel_result(status['currentWork'], message)",
"async def test_cancel(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n interval_schedule = IntervalSchedule()\n interval_schedule.name = 'cancel_test'\n interval_schedule.process_name = 'sleep30'\n interval_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(interval_schedule)\n\n await asyncio.sleep(5)\n tasks = await scheduler.get_running_tasks()\n\n await scheduler.cancel_task(tasks[0].task_id) # Cancel a running task\n\n await self.stop_scheduler(scheduler)",
"def _doAbort(self):\n self._cmdAbort()",
"def do_cancel(order):\r\n self.gox.cancel(order.oid)",
"async def wait_for_cancel(self):\n await self._cancel",
"def rollback(self):\n self.success = False\n self.close()",
"def stop(self):\n self.auto_commit_interval = None",
"def cancel(self):\n self.waiter.set_result_if_pending(True)\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()",
"def cancel():",
"def _do_rollback(self):\n self.backend.rollback()",
"def Abort(self):\n handler = self.get_command_object(\"Abort\")\n handler()",
"def cancel(self):\n self.waiter.set_result_if_pending([])\n \n timer = self.timer\n if (timer is not None):\n self.timer = None\n timer.cancel()",
"def cancel(self):\n self.__validate_engine()\n return pythonengine.cancelFEval(self._future)",
"def cancel(self):\n with self.handle_alert(confirm=False):\n self.q(css='button#confirm').first.click()"
] | [
"0.60639095",
"0.5988455",
"0.581982",
"0.5772934",
"0.5643513",
"0.56363535",
"0.56363535",
"0.56363535",
"0.56363535",
"0.5620277",
"0.56076694",
"0.5583796",
"0.55767006",
"0.5570505",
"0.55680966",
"0.5564965",
"0.5541141",
"0.5533509",
"0.55272144",
"0.54791105",
"0.5477595",
"0.5444307",
"0.54219234",
"0.5409412",
"0.53993344",
"0.5391486",
"0.53854376",
"0.53818005",
"0.5377055",
"0.53735197"
] | 0.62550855 | 0 |
Returns SSM document execution status for given execution id. | def _get_execution_status(self, execution_id, document_name=None):
execution = self.ssm_client.get_automation_execution(
AutomationExecutionId=execution_id
)
# TODO(semiond): we can remove document name as parameter, can take it by execution id.
document_name = document_name if document_name else execution['AutomationExecution']['DocumentName']
step_executions = execution['AutomationExecution']['StepExecutions']
step = self._get_step_by_status(step_executions, 'InProgress')
if step:
step_name = step['StepName']
self.logger.info(f'Waiting SSM document step [{document_name}>{step_name}] to be completed: '
f'{self.get_execution_step_url(execution_id, step_name, step_executions)}')
return execution['AutomationExecution']['AutomationExecutionStatus'] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wait_for_execution_completion(self, execution_id, document_name=None):\n # Fetch ssm execution status\n status = self._get_execution_status(execution_id, document_name)\n\n # Wait for execution to be completed\n while status == 'InProgress' or status == 'Pending' or status == 'Cancelling' or status == 'Waiting':\n time.sleep(constants.sleep_time_secs)\n status = self._get_execution_status(execution_id, document_name)\n return status",
"def get_run_status(self, run_id):\n postresult = requests.get(\n f\"{self.proto}://{self.host}/ga4gh/wes/v1/runs/{run_id}/status\",\n headers=self.auth,\n )\n return wes_reponse(postresult)",
"def get_status(self, scenario_id):\n table = self.get_execute_table()\n try:\n return table.loc[int(scenario_id), \"status\"]\n except KeyError:\n raise Exception(f\"Scenario not found in execute list, id = {scenario_id}\")",
"def document_status(document_id: uuid.UUID, db: Session = Depends(get_db)):\n document_status = get_document_status(db, document_id)\n return document_status",
"def get_workflow_execution_state(self, workbook_name, execution_id):\n cntx = auth_context.ctx()\n kwargs = {'workbook_name': workbook_name,\n 'execution_id': execution_id}\n return self._client.call(\n cntx, 'get_workflow_execution_state', **kwargs)",
"def get_status(id):\n task = run_ctx_request.AsyncResult(id)\n if task.state == states.PENDING:\n abort(404)\n if task.state == states.RECEIVED or task.state == states.STARTED:\n return '', 202, {'Location': url_for('api.get_status', id=id)}\n return task.info",
"def _get_execution_step_status(self, execution_id, step_name):\n execution = self.ssm_client.get_automation_execution(\n AutomationExecutionId=execution_id\n )\n step_executions = execution['AutomationExecution']['StepExecutions']\n step = self._get_step_by_name(step_executions, step_name)\n if step:\n return step['StepStatus']\n return 'Pending'",
"def get_status_for_experiment(self, id):\n # open = 'open'\n running = 'running'\n finished = 'finished'\n waiting = 'waiting'\n\n experiment = Experiment.get(id)\n date_time_now = datetime.datetime.now()\n start_datetime = experiment.startDatetime\n end_datetime = experiment.endDatetime\n if start_datetime >= end_datetime:\n # validate this earlier\n return None\n if start_datetime <= date_time_now and date_time_now <= end_datetime:\n return running\n elif date_time_now > end_datetime:\n return finished\n elif date_time_now < start_datetime:\n return waiting\n return None",
"def get_status(self, run_id):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/runnables/%s/state/%s\" % (self.project_key, self.runnable_type, run_id))",
"def get_status_by_id(cls, request, id):\n return request.dbsession.query(cls).get(id).status",
"def get_workflow_execution_state(self, cntx, **kwargs):\n workbook_name = kwargs.get('workbook_name')\n execution_id = kwargs.get('execution_id')\n\n execution = db_api.execution_get(execution_id)\n\n if not execution:\n raise exc.EngineException(\"Workflow execution not found \"\n \"[workbook_name=%s, execution_id=%s]\"\n % (workbook_name, execution_id))\n\n return execution[\"state\"]",
"def export_status(self, file_id):\n response = self._client.get('workbenches/export/%(file_id)s/status',\n path_params={'file_id': file_id})\n return loads(response.text).get('status')",
"def get(self, task_id, session=None):\n try:\n task = session.query(db.StatusTask).filter(db.StatusTask.id == task_id).one()\n except NoResultFound:\n raise NotFoundError('task status with id %d not found' % task_id)\n\n args = executions_parser.parse_args()\n\n # Pagination and sorting params\n page = args['page']\n per_page = args['per_page']\n sort_by = args['sort_by']\n sort_order = args['order']\n\n # Filter params\n succeeded = args.get('succeeded')\n produced = args.get('produced')\n start_date = args.get('start_date')\n end_date = args.get('end_date')\n\n if per_page > 100:\n per_page = 100\n\n start = per_page * (page - 1)\n stop = start + per_page\n descending = sort_order == 'desc'\n\n kwargs = {\n 'start': start,\n 'stop': stop,\n 'task_id': task_id,\n 'order_by': sort_by,\n 'descending': descending,\n 'succeeded': succeeded,\n 'produced': produced,\n 'start_date': start_date,\n 'end_date': end_date,\n 'session': session,\n }\n\n total_items = task.executions.count()\n\n if not total_items:\n return jsonify([])\n\n executions = [e.to_dict() for e in db.get_executions_by_task_id(**kwargs)]\n\n total_pages = int(ceil(total_items / float(per_page)))\n\n if page > total_pages:\n raise NotFoundError('page %s does not exist' % page)\n\n # Actual results in page\n actual_size = min(len(executions), per_page)\n\n # Get pagination headers\n pagination = pagination_headers(total_pages, total_items, actual_size, request)\n\n # Create response\n rsp = jsonify(executions)\n\n # Add link header to response\n rsp.headers.extend(pagination)\n return rsp",
"def get_operation_status(self, lifecycle_operation_occurrence_id):\n LOG.debug('\"Lifecycle Operation Occurrence Id\" is not implemented in OpenStack Tacker client!')\n LOG.debug('Will return the state of the resource with given Id')\n\n return constants.OPERATION_SUCCESS",
"async def get_task_status(task_id: TaskId):",
"def get_oozie_status(self, job_id):\n self.echo('Checking status...')\n status = self.call_return(\"oozie job -oozie \" + self.pylot_cfg.hdfs_oozie_interface + \" -info \" + job_id + \" | grep 'Status' | grep ':' | awk '{print $NF}'\")\n status = status.strip('\\n')\n return status",
"def check_query_status(self, query_execution_id):\n @backoff.on_predicate(backoff.fibo,\n lambda status: status in ('QUEUED', 'RUNNING'),\n max_value=10,\n jitter=backoff.full_jitter,\n on_backoff=_backoff_handler,\n on_success=_success_handler)\n def _get_query_execution(query_execution_id):\n return self.athena_client.get_query_execution(\n QueryExecutionId=query_execution_id\n )['QueryExecution']['Status']['State']\n\n return _get_query_execution(query_execution_id)",
"def get_saved_export_task_status(export_instance_id):\n download_data = _get_saved_export_download_data(export_instance_id)\n return get_task_status(download_data.task)",
"def status(self, command_id):\n path = \"commands/status?commandId=%s&contextId=%s&clusterId=%s\" % (\n command_id,\n self.context.id,\n self.cluster_id,\n )\n return self.get(self.url, \"1.2\", path, token=self.token)",
"def query_execution(self, execution_id: str):\n return self._call_txtrader_api('query_execution', {'id': execution_id})",
"def mesos_status(self, submissionId):\n get_tasks = self.driver.getTasks()['get_tasks']\n task_state = None\n\n tasks = get_tasks['tasks'] + get_tasks.get('completed_tasks')\n tasks_list = list(filter(lambda x: x['task_id']['value'] == submissionId, tasks))\n if len(tasks_list) > 0:\n task = tasks_list[0]\n task_state = task['state']\n self._log.debug(\"Task state = \" + task_state)\n else:\n self._log.debug(\"Task not found\")\n\n return task_state",
"def get_operation_status(self, lifecycle_operation_occurrence_id):\n return self.em_adapter.get_operation_status(lifecycle_operation_occurrence_id)",
"def get_task_status(task_id):\r\n mock_request = Mock()\r\n mock_request.REQUEST = {'task_id': task_id}\r\n response = instructor_task_status(mock_request)\r\n status = json.loads(response.content)\r\n return status",
"def get_receipt_id_status(self):\n return self.get_document_status_choice(self.receipt_id_status)",
"def get(self, task_id, session=None):\n try:\n task = session.query(db.StatusTask).filter(db.StatusTask.id == task_id).one()\n except NoResultFound:\n raise NotFoundError('task status with id %d not found' % task_id)\n\n args = tasks_parser.parse_args()\n include_execution = args.get('include_execution')\n\n st_task = task.to_dict()\n if include_execution:\n execution = task.executions.order_by(db.TaskExecution.start.desc()).first()\n st_task['last_execution'] = execution.to_dict() if execution else {}\n return jsonify(st_task)",
"def status(self) -> pulumi.Output['outputs.AssessmentStatusResponse']:\n return pulumi.get(self, \"status\")",
"def get_project_job_status(id):\n user = current_user\n\n if user.get_id() is not None:\n _tasks = user.get_project_tasks_in_progress(id)\n running_task_dicts = get_running_task_dicts(_tasks)\n\n _tasks = user.get_finished_project_tasks(id)\n finished_task_dicts = get_finished_task_dicts(_tasks)\n\n response_object = {\n 'running_tasks': running_task_dicts,\n 'finished_tasks': finished_task_dicts\n }\n else:\n response_object = {'status': 'error'}\n # print(jsonify(response_object))\n return jsonify(response_object)",
"def status(self) -> pulumi.Output['outputs.AssessmentStatusResponseResponse']:\n return pulumi.get(self, \"status\")",
"async def get_status(self, sms_id: int) -> SmsStatus:\n raise NotImplementedError",
"def get_query_execution(QueryExecutionId=None):\n pass"
] | [
"0.6311759",
"0.62737817",
"0.624106",
"0.6151789",
"0.61108553",
"0.6006403",
"0.59510785",
"0.5879625",
"0.585945",
"0.56999177",
"0.56882",
"0.5642697",
"0.5611689",
"0.5606932",
"0.55757666",
"0.5573651",
"0.5539164",
"0.5529198",
"0.5528047",
"0.5503078",
"0.5492322",
"0.548522",
"0.54529345",
"0.545168",
"0.5448983",
"0.5420939",
"0.54116094",
"0.53790987",
"0.5369325",
"0.5362738"
] | 0.7906296 | 0 |
Returns execution step status for given execution id and step name. | def _get_execution_step_status(self, execution_id, step_name):
execution = self.ssm_client.get_automation_execution(
AutomationExecutionId=execution_id
)
step_executions = execution['AutomationExecution']['StepExecutions']
step = self._get_step_by_name(step_executions, step_name)
if step:
return step['StepStatus']
return 'Pending' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_successfully_executed_steps_by_order(self, execution_id):\n execution = self.ssm_client.get_automation_execution(\n AutomationExecutionId=execution_id\n )\n step_executions = execution['AutomationExecution']['StepExecutions']\n step_names: List = []\n if step_executions:\n for s in step_executions:\n if s['StepStatus'] == 'Success':\n step_names.append(s['StepName'])\n return step_names",
"def _get_execution_status(self, execution_id, document_name=None):\n execution = self.ssm_client.get_automation_execution(\n AutomationExecutionId=execution_id\n )\n # TODO(semiond): we can remove document name as parameter, can take it by execution id.\n document_name = document_name if document_name else execution['AutomationExecution']['DocumentName']\n step_executions = execution['AutomationExecution']['StepExecutions']\n step = self._get_step_by_status(step_executions, 'InProgress')\n if step:\n step_name = step['StepName']\n self.logger.info(f'Waiting SSM document step [{document_name}>{step_name}] to be completed: '\n f'{self.get_execution_step_url(execution_id, step_name, step_executions)}')\n return execution['AutomationExecution']['AutomationExecutionStatus']",
"def wait_for_execution_step_status_is_in_progress(self, execution_id, document_name, step_name, time_to_wait):\n start_time = time.time()\n step_status = self._get_execution_step_status(execution_id, step_name)\n elapsed_time = time.time() - start_time\n\n # Wait for execution step to resolve in waiting or one of terminating statuses\n while step_status == 'Pending':\n if elapsed_time > time_to_wait:\n self.logger.exception(f'Execution step {step_name} for document {document_name} timed out')\n return 'WaitTimedOut'\n time.sleep(constants.sleep_time_secs)\n step_status = self._get_execution_step_status(execution_id, step_name)\n elapsed_time = time.time() - start_time\n return step_status",
"def _get_execution_status_from_past_execution(\n self,\n config_unit: ConfigUnit,\n step_name: str\n ) -> Optional[StepExecutionStatus]:\n execution_status = get_past_execution_status(config_unit, step_name)\n if execution_status is not None:\n return copy.deepcopy(execution_status)\n\n return self._get_cascaded_failure_execution_status(\n config_unit, step_name)",
"def _get_step_by_status(self, steps, status):\n if steps:\n for s in steps:\n if s['StepStatus'] == status:\n return s",
"def get_status(self, scenario_id):\n table = self.get_execute_table()\n try:\n return table.loc[int(scenario_id), \"status\"]\n except KeyError:\n raise Exception(f\"Scenario not found in execute list, id = {scenario_id}\")",
"def _get_step_by_name(self, steps, step_name):\n if steps:\n for s in steps:\n if s['StepName'] == step_name:\n return s",
"def _get_step_execution_index(self, step_executions: [], step_name):\n index = 1\n for step_execution in step_executions:\n if step_name == step_execution['StepName']:\n return index\n index += 1",
"def get_execution_step_url(self, execution_id: str, step_name: str, steps: [] = None) -> str:\n if not steps or len(steps) < 1:\n execution = self.ssm_client.get_automation_execution(AutomationExecutionId=execution_id)\n steps = execution['AutomationExecution']['StepExecutions']\n\n step = self._get_step_by_name(steps, step_name)\n if not step:\n raise Exception(f'SSM document step [{step_name}] does not exist in execution: '\n f'{self.get_execution_url(execution_id)}')\n step_execution_id = step['StepExecutionId']\n step_index = self._get_step_execution_index(steps, step_name)\n return f'https://{self.region}.console.aws.amazon.com/systems-manager/automation/execution/{execution_id}' \\\n f'/step/{step_index}/{step_execution_id}'",
"def wait_for_execution_step_status_is_terminal_or_waiting(self, execution_id, document_name,\n step_name, time_to_wait):\n start_time = time.time()\n step_status = self._get_execution_step_status(execution_id, step_name)\n elapsed_time = time.time() - start_time\n\n # Wait for execution step to resolve in waiting or one of terminating statuses\n while step_status == 'InProgress' or step_status == 'Pending' or step_status == 'Cancelling':\n if elapsed_time > time_to_wait:\n self.logger.exception(f'Execution step {step_name} for document {document_name} timed out')\n return 'WaitTimedOut'\n time.sleep(constants.sleep_time_secs)\n step_status = self._get_execution_step_status(execution_id, step_name)\n elapsed_time = time.time() - start_time\n return step_status",
"def get_step_by_name(self, name):\n self._validate_step_name(name)\n name = str(name)\n try:\n return self.all_upstream_steps[name]\n except KeyError as e:\n msg = 'No Step with name \"{}\" found. ' \\\n 'You have following Steps: {}'.format(name, list(self.all_upstream_steps.keys()))\n raise StepError(msg) from e",
"def get_workflow_execution_state(self, workbook_name, execution_id):\n cntx = auth_context.ctx()\n kwargs = {'workbook_name': workbook_name,\n 'execution_id': execution_id}\n return self._client.call(\n cntx, 'get_workflow_execution_state', **kwargs)",
"def _run(self, config_unit: ConfigUnit, step_name: str) \\\n -> StepExecutionStatus:\n step_type = get_step_type(config_unit, step_name)\n step = config_unit.get_steps(step_type)[step_name]\n\n # Run command\n command = '(cd \"' + config_unit.directory + '\"; \\n' + step.command +\\\n '\\n)'\n return_code, output, subprocess = sh(command, detached=step.background)\n successful = return_code == 0\n\n if subprocess.poll() is None:\n config_unit.pending_subprocesses.append(subprocess)\n\n step_execution_status = \\\n StepExecutionStatus(config_unit.name, step_name,\n successful=successful, output=output,\n step_type=step_type)\n\n return step_execution_status",
"def get_step_idx(self, step_id: str) -> int:\n return self.step_id2idx.get(step_id, None)",
"def getStep():\n # TODO: can there be non-Step logs?",
"def select_step_with_status(status, steps):\n for step in steps:\n assert isinstance(step, model.Step), \"TYPE-MISMATCH: \"+\\\n \"step.class={0}\".format(step.__class__.__name__)\n if step.status == status:\n return step\n # -- OTHERWISE: No step with the given status found.\n # KeyError(\"Step with status={0} not found\".format(status))\n return None",
"def get_job_step_dependent_status(self, job_id, step_id):\n try:\n result = self._session.query(\n StepDependencyEntity.parent_id,\n JobStepEntity.status\n ).\\\n filter(StepDependencyEntity.child_id == step_id).\\\n filter(StepDependencyEntity.parent_id == StepEntity.id).\\\n filter(StepEntity.id == JobStepEntity.step_id).\\\n filter(JobStepEntity.job_id == job_id).\\\n all()\n\n result_dict = [\n {\n 'parent_id': row[0],\n 'status': row[1]\n } for row in result\n ]\n except SQLAlchemyError as err:\n Log.an().error('sql exception [%s]', str(err))\n return False\n\n return result_dict",
"def progress_step(inst) -> Any:\n try:\n return TestStatus.stages().index(inst)\n except ValueError:\n return -1",
"def get_param(self, step_id, name):\n step_params = self._params.get(step_id)\n return step_params.get(name) if step_params else None",
"def get_step_by_name(self, step_name, case_sensitive=True):\n logger.debug(\"Steps are '%s', looking for step '%s'.\", [step.name for step in self.steps], step_name)\n for step in self.steps:\n if case_sensitive:\n if step.name == step_name:\n return step\n else:\n if step.name.lower() == step_name.lower():\n return step\n raise ValueError(\"A ticket step with the name '%s' could not be found.\", step_name)",
"def get_status(self):\n if self._is_running():\n return \"RUNNING\"\n elif self._has_error():\n # The run started but failed\n return \"FAILED\"\n elif self._is_finished():\n # The run was finished\n return \"FINISHED\"\n elif self.current_step() >= 0:\n # The run started at some point but was not completed\n return \"INCOMPLETE\"\n else:\n # The run did not start\n return \"NOT STARTED\"",
"def step_id(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"step_id\")",
"def get_workflow_execution_state(self, cntx, **kwargs):\n workbook_name = kwargs.get('workbook_name')\n execution_id = kwargs.get('execution_id')\n\n execution = db_api.execution_get(execution_id)\n\n if not execution:\n raise exc.EngineException(\"Workflow execution not found \"\n \"[workbook_name=%s, execution_id=%s]\"\n % (workbook_name, execution_id))\n\n return execution[\"state\"]",
"def get_status(id):\n task = run_ctx_request.AsyncResult(id)\n if task.state == states.PENDING:\n abort(404)\n if task.state == states.RECEIVED or task.state == states.STARTED:\n return '', 202, {'Location': url_for('api.get_status', id=id)}\n return task.info",
"def get_step_argument(self, name):\n sa = StepArgument.get(self._symbol)\n if sa:\n return sa.get(name)",
"def get_status_for_experiment(self, id):\n # open = 'open'\n running = 'running'\n finished = 'finished'\n waiting = 'waiting'\n\n experiment = Experiment.get(id)\n date_time_now = datetime.datetime.now()\n start_datetime = experiment.startDatetime\n end_datetime = experiment.endDatetime\n if start_datetime >= end_datetime:\n # validate this earlier\n return None\n if start_datetime <= date_time_now and date_time_now <= end_datetime:\n return running\n elif date_time_now > end_datetime:\n return finished\n elif date_time_now < start_datetime:\n return waiting\n return None",
"def _check_step_completed(self, i):\n\n module, _ = self._get_command_config(i)\n status = self._get_status_obj()\n submitted = self._check_jobs_submitted(status, module)\n if not submitted:\n return_code = 1\n else:\n return_code = self._get_module_return_code(status, module)\n\n return return_code",
"def get_step_output(self, execution_id, step_name, output_key):\n execution = self.ssm_client.get_automation_execution(\n AutomationExecutionId=execution_id\n )\n step_executions = execution['AutomationExecution']['StepExecutions']\n step = self._get_step_by_name(step_executions, step_name)\n if step and step.get('Outputs') and step.get('Outputs').get(output_key):\n return step['Outputs'][output_key][0]",
"def step_id(self):\n return self._step_id",
"def step_name(self):\n return self._step_name"
] | [
"0.6936771",
"0.6909824",
"0.6842648",
"0.6689437",
"0.65554833",
"0.6498717",
"0.6436358",
"0.63689363",
"0.62805057",
"0.6166205",
"0.61314565",
"0.5980744",
"0.5944853",
"0.58768517",
"0.5850708",
"0.58459425",
"0.5805325",
"0.57995373",
"0.5733456",
"0.5710865",
"0.56941634",
"0.56815916",
"0.56705415",
"0.56243557",
"0.5551072",
"0.55491215",
"0.55463076",
"0.5543362",
"0.55416673",
"0.55413246"
] | 0.85641456 | 0 |
Returns successfully executed steps by order of their execution | def get_successfully_executed_steps_by_order(self, execution_id):
execution = self.ssm_client.get_automation_execution(
AutomationExecutionId=execution_id
)
step_executions = execution['AutomationExecution']['StepExecutions']
step_names: List = []
if step_executions:
for s in step_executions:
if s['StepStatus'] == 'Success':
step_names.append(s['StepName'])
return step_names | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getSteps():",
"def test_by_order(self):\n # addon_executor = AddonExecutor(execute_order, stop_order)\n # self.assertEqual(expected, addon_executor.execute_with_order(addon, execute_order, stop_order))\n self.run_mgr.by_order(self.cli_inst, ['execute', 'start'], ['stop'])\n output = self._get_lines_as_list(sys.stdout)\n\n self.assertTrue(output[0].startswith('Execute'))\n self.assertTrue(output[1].startswith('Start'))\n self.assertTrue(output[2].startswith('Stop'))",
"def get_succeeded_actions(self):\n return self.success",
"def excecute(self):\r\n self.initialize()\r\n self.addteststeps()\r\n for teststep in self.test_steps_list:\r\n if teststep.run() == TestStatus.PASS:\r\n logging.info(\"test {} passed the test\".format(teststep.stepname))\r\n self.result = TestStatus.PASS\r\n else:\r\n logging.warn(\"test {} failed the test\".format(teststep.stepname))\r\n self.result = TestStatus.FAIL\r\n self.cleanup()\r\n return self.result",
"def _check_step_completed(self, i):\n\n module, _ = self._get_command_config(i)\n status = self._get_status_obj()\n submitted = self._check_jobs_submitted(status, module)\n if not submitted:\n return_code = 1\n else:\n return_code = self._get_module_return_code(status, module)\n\n return return_code",
"def _GetStepsAndTests(failed_steps):\n\n failed_steps_and_tests = []\n\n if not failed_steps:\n return failed_steps_and_tests\n\n for step_name, step in failed_steps.iteritems():\n for test_name in step.get('tests', [None]):\n failed_steps_and_tests.append([step_name, test_name])\n\n return sorted(failed_steps_and_tests)",
"def test_sequence_done(self):\n self.t(\"1,2 done\")\n code, out, err = self.t(\"_get 1.status 2.status\")\n self.assertEqual(\"completed completed\\n\", out)",
"def steps_done(self):\n with _MonitorEnv._lock:\n return self._steps_done",
"def stepFinished(build, step, results):",
"def execution_order(self) -> typing.Iterator:\n return self.execution_order_strategy_class(self._steps)",
"def step_async(self, actions):",
"def _run_one_phase(self, min_steps, statistics, run_mode_str,\n agent_type='active'):\n step_count = 0\n num_episodes = 0\n sum_returns = 0.\n\n while step_count < min_steps:\n episode_length, episode_return = self._run_one_episode(agent_type)\n statistics.append({\n '{}_{}_episode_lengths'.format(run_mode_str,\n agent_type): episode_length,\n '{}_{}_episode_returns'.format(run_mode_str,\n agent_type): episode_return\n })\n step_count += episode_length\n sum_returns += episode_return\n num_episodes += 1\n # We use sys.stdout.write instead of logging so as to flush frequently\n # without generating a line break.\n sys.stdout.write('Steps executed: {} '.format(step_count) +\n 'Episode length: {} '.format(episode_length) +\n 'Return: {}\\r'.format(episode_return))\n sys.stdout.flush()\n return step_count, sum_returns, num_episodes",
"def step(self, actions):\n self.step_async(actions)\n return self.step_wait()",
"def step(self, actions):\n self.step_async(actions)\n return self.step_wait()",
"def get_next_steps(self, steps):\n step_list = []\n\n steps_remaining = set(steps.keys())\n counter = 0\n max_counter = 10000\n next_steps = set()\n\n for step in steps_remaining:\n dependencies = steps[step]\n if len(dependencies) == 0:\n next_steps.add(step)\n\n # this is the list of things that can be take for work now\n return sorted(next_steps)",
"def test_arrange_test_result_one_module(self):\n pass_1 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_2 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n pass_3 = self._create_test_result(status=test_runner_base.PASSED_STATUS)\n fail_1 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n fail_2 = self._create_test_result(status=test_runner_base.FAILED_STATUS)\n ignore_1 = self._create_test_result(status=test_runner_base.IGNORED_STATUS)\n reporter_1 = result_reporter.ResultReporter()\n reporter_1.all_test_results.extend([pass_1, pass_2, pass_3])\n reporter_2 = result_reporter.ResultReporter()\n reporter_2.all_test_results.extend([fail_1, fail_2, ignore_1])\n info_dict = {}\n aei.AtestExecutionInfo._arrange_test_result(info_dict, [reporter_1, reporter_2])\n expect_summary = {aei._STATUS_IGNORED_KEY : 1,\n aei._STATUS_FAILED_KEY : 2,\n aei._STATUS_PASSED_KEY : 3}\n self.assertEqual(expect_summary, info_dict[aei._TOTAL_SUMMARY_KEY])",
"def test_result_order(env):\n timeouts = list(reversed([env.timeout(delay) for delay in range(3)]))\n\n def p(env, timeouts):\n results = yield env.all_of(timeouts)\n assert list(results.keys()) == timeouts\n\n env.process(p(env, timeouts))\n env.run()",
"def print_results(self):\n for test_cases in self._tests:\n for test_case in test_cases:\n print('{} ...ok'.format(test_case.get_name()))\n return 0",
"def passed(self):\n return self.is_executed and self.is_executed_ok and self.is_equal_result",
"def checkStep(rc, steps, run_status, prog_args):\n\n if (rc == FAILURE) or (rc == EXCEPTION):\n buildException(run_status, 'previous command failed')\n else:\n defer.maybeDeferred(lambda x: startNextStep(x,\n run_status, prog_args), steps)",
"def stages() -> List[Tuple[str, str]]:\n return [TestStatus.preparation, TestStatus.testing, TestStatus.completed]",
"def try_execute(self):\n executed_tasks_count = 0\n for k, v in iter(self.tasks.items()):\n if v.running and not v.finished:\n executed_tasks_count += 1\n\n if executed_tasks_count == self.task_count:\n self.start_time = time.time()\n self.migration_count += 1\n self.started = True\n self.running = True\n return True, executed_tasks_count\n return False, executed_tasks_count",
"def execution_order(phases):\r\n\r\n # Its key that we process phase dependencies depth first to maintain initial phase ordering as\r\n # passed in when phase graphs are dependency disjoint. A breadth first sort could mix next\r\n # order executions and violate the implied intent of the passed in phase ordering.\r\n\r\n processed = set()\r\n\r\n def order(_phases):\r\n for phase in _phases:\r\n if phase not in processed:\r\n processed.add(phase)\r\n for goal in phase.goals():\r\n for dep in order(goal.dependencies):\r\n yield dep\r\n yield phase\r\n\r\n for ordered in order(phases):\r\n yield ordered",
"def execute(self):\n while self.index < len(self.tasklist):\n res = self.step()\n self.logger.debug('SeqSet task %i has completed with result %s' %\n (self.index, res))\n\n # Returns result of last task to quit\n return res",
"def test_single_passing_test_step_for_asc(self, single_passing_test_step_for_asc):\n\n # Setup\n single_passing_test_step_for_asc.assert_invoke_zigzag()\n test_runs = single_passing_test_step_for_asc.tests[0].qtest_test_runs\n\n # Expectations\n test_run_status_exp = 'Passed'\n\n # Test\n assert len(test_runs) == 1\n assert len(single_passing_test_step_for_asc.tests) == 1\n pytest.helpers.assert_qtest_property(test_runs[0], 'Status', test_run_status_exp)",
"def test_mixed_status_test_steps_for_asc(self, mixed_status_test_steps_for_asc):\n\n # Setup\n mixed_status_test_steps_for_asc.assert_invoke_zigzag()\n test_runs = mixed_status_test_steps_for_asc.tests[0].qtest_test_runs\n\n # Expectations\n test_run_status_exp = 'Failed'\n test_failure_msg_regex_exp = r'Test execution state: failure'\n\n # Test\n assert len(test_runs) == 1\n assert len(mixed_status_test_steps_for_asc.tests) == 3\n pytest.helpers.assert_qtest_property(test_runs[0], 'Status', test_run_status_exp)\n pytest.helpers.assert_qtest_property_search(test_runs[0], 'Failure Output', test_failure_msg_regex_exp)",
"def step(self, actions: np.ndarray) -> Tuple[np.ndarray, np.ndarray, bool, str]:\n\n int_actions = [np.where(r == 1)[0][0] for r in np.vstack(actions)]\n for agent in range(self.agents):\n action = int_actions[agent]\n valid_move = self.game.submit_move_for_agent(\n agent, constants.ACTIONS[action]\n )\n\n self.turns_count += 1\n done = np.array([False] * self.agents).reshape(1, -1)\n\n if self.game.check_all_arrived() or self.turns_count >= self.max_turns:\n if self.game.check_all_arrived():\n print(\"reached goals after %i \" % self.turns_count)\n done = np.array([True] * self.agents).reshape(1, -1)\n\n rewards = self.compute_reward(done)\n\n if self.use_alternative_states:\n states = self.make_alternative_states()\n else:\n states = self.make_states()\n\n return states, rewards, done, {\"Not Implemented\": \"\"}",
"def active_result(self):\n return self.step_client.previous_step_result()",
"def __get_final_successor_and_start(actions):\n branch_start_actions = []\n final_successor_action = []\n for steps in actions:\n steps_action = get_action_type(action=steps)\n if \"StartAction\" in steps_action:\n branch_start_actions.append(steps)\n elif \"StopAction\" in steps_action:\n final_successor_action.append(steps)\n return branch_start_actions, final_successor_action",
"async def _check_steps(self):\n # Bools of collections that were started/finished\n started = [x.ec2_collection.started for x in self._set_links]\n finished = [x.ec2_collection.finished for x in self._set_links]\n\n # If all steps were started and finished, the run is complete.\n if all(started) and all(finished):\n return True\n\n # Locate all steps that have completed\n dones = await gen.multi([self._is_done(x) for x in self._set_links])\n dones = zip(dones, self._set_links)\n\n # Send shutdown to steps that have completed, we can shut them all\n # down in any order so we run in parallel\n async def shutdown(setlink):\n try:\n await self._stop_step(setlink)\n except:\n logger.error(\"Exception in shutdown.\", exc_info=True)\n\n setlink.step_record.completed_at = datetime.utcnow()\n self._db_session.commit()\n await gen.multi([shutdown(s) for done, s in dones if done])\n\n # Start steps that should be started, ordered by delay\n starts = list(filter(self._should_start, self._set_links))\n starts.sort(key=lambda x: x.step.run_delay)\n\n # Start steps in order of lowest delay first, to ensure that steps\n # started afterwards can use DNS names/etc from prior steps\n for setlink in starts:\n # We tag the collection here since this may not actually run\n # until another time through this loop due to async nature\n setlink.ec2_collection.local_dns = bool(self._dns_map)\n\n try:\n await self._start_step(setlink)\n except:\n logger.error(\"Exception starting.\", exc_info=True)\n setlink.step_record.failed = True\n\n setlink.step_record.started_at = datetime.utcnow()\n self._db_session.commit()\n\n # If this collection reg's a dns name, add this collections\n # ip's to the name\n if setlink.step.dns_name:\n ips = [x.instance.ip_address for x\n in setlink.ec2_collection.instances]\n self._dns_map[setlink.step.dns_name] = ips\n return False"
] | [
"0.6670046",
"0.6322163",
"0.6297291",
"0.6226086",
"0.619969",
"0.6090397",
"0.6030043",
"0.6016758",
"0.6015137",
"0.59943765",
"0.5981895",
"0.5952482",
"0.5910737",
"0.5910737",
"0.5741957",
"0.57394445",
"0.57195204",
"0.5695635",
"0.5670944",
"0.5652498",
"0.56230104",
"0.5622694",
"0.5618131",
"0.56166667",
"0.5599631",
"0.55990577",
"0.5588391",
"0.55828625",
"0.557094",
"0.55559933"
] | 0.6954507 | 0 |
Returns SSM document step by given status. | def _get_step_by_status(self, steps, status):
if steps:
for s in steps:
if s['StepStatus'] == status:
return s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def select_step_with_status(status, steps):\n for step in steps:\n assert isinstance(step, model.Step), \"TYPE-MISMATCH: \"+\\\n \"step.class={0}\".format(step.__class__.__name__)\n if step.status == status:\n return step\n # -- OTHERWISE: No step with the given status found.\n # KeyError(\"Step with status={0} not found\".format(status))\n return None",
"def select_steps_with_status(status, steps):\n return [ step for step in steps if step.status == status ]",
"def progress_step(inst) -> Any:\n try:\n return TestStatus.stages().index(inst)\n except ValueError:\n return -1",
"def getCurrentStep():",
"def _get_step_by_name(self, steps, step_name):\n if steps:\n for s in steps:\n if s['StepName'] == step_name:\n return s",
"def get_document_stage(document):\n if document['name'] in PUBLISHED_DOCS:\n return constants.CHAPTER_STATE_PUBLISHED\n if document['status'] == constants.CHAPTER_STATUS_COMPLETED:\n return constants.CHAPTER_STATE_FINAL_EDITING\n translation_stage = document['workflowStages'][0]\n if translation_stage['progress'] > 99:\n return constants.CHAPTER_STATE_EDITING\n return constants.CHAPTER_STATE_TRANSLATION",
"def getStep():\n # TODO: can there be non-Step logs?",
"def _get_module_status(status, i):\n\n # iterate through modules and find the one that was run previously\n for module_status in status.data.values():\n i_current = module_status.get('pipeline_index', -99)\n if str(i) == str(i_current):\n out = module_status\n break\n\n return out",
"def _get_execution_step_status(self, execution_id, step_name):\n execution = self.ssm_client.get_automation_execution(\n AutomationExecutionId=execution_id\n )\n step_executions = execution['AutomationExecution']['StepExecutions']\n step = self._get_step_by_name(step_executions, step_name)\n if step:\n return step['StepStatus']\n return 'Pending'",
"def _get_job_status(module_status, option='all'):\n\n # find the preceding job (1st is used, should be one job in most cases)\n if option == 'first':\n for job, job_status in module_status.items():\n if job != 'pipeline_index':\n out = job_status\n break\n elif option == 'all':\n out = []\n for job, job_status in module_status.items():\n if job != 'pipeline_index':\n out.append(job_status)\n else:\n raise KeyError('Did not recognize pipeline job status request '\n 'for \"{}\"'.format(option))\n return out",
"def get_step(self):\n return self.step",
"def get_step(self):\n return self.step",
"def get_by_status(status):\n return list(tasks.find({'status': status}))",
"def step(self, uuid):\n return self.__get_object(self.get(\"steps/{}\".format(uuid)))",
"def _status_to_state(status):\n if status == 'failed':\n return Finding.State.ACTIVE\n elif status == 'passed' or status == 'skipped':\n return Finding.State.INACTIVE\n else:\n return Finding.State.STATE_UNSPECIFIED",
"def getNextStep(self, source, target, current_step):\n wf_name = self.getWorkflowName(source, target)\n steps = self.getWorkflowSteps(wf_name)\n current_idx = self._getCurrentIdx(steps, current_step) \n next_idx = min(current_idx+1, len(steps)+1)\n return steps[next_idx]",
"def _get_execution_status(self, execution_id, document_name=None):\n execution = self.ssm_client.get_automation_execution(\n AutomationExecutionId=execution_id\n )\n # TODO(semiond): we can remove document name as parameter, can take it by execution id.\n document_name = document_name if document_name else execution['AutomationExecution']['DocumentName']\n step_executions = execution['AutomationExecution']['StepExecutions']\n step = self._get_step_by_status(step_executions, 'InProgress')\n if step:\n step_name = step['StepName']\n self.logger.info(f'Waiting SSM document step [{document_name}>{step_name}] to be completed: '\n f'{self.get_execution_step_url(execution_id, step_name, step_executions)}')\n return execution['AutomationExecution']['AutomationExecutionStatus']",
"def find_id_medio_fisico(status):\n return 'do some magic!'",
"def step(self, sess, step):\n\t\tif self.is_training:\n\t\t\tloss, optim, summaries = sess.run(\n\t\t\t\t\t[self.loss, self.optim, self.summary_op])\n\t\t\tself.writer.add_summary(summaries, global_step=step)\n\t\telse:\n\t\t\tse = sess.run([self.se])[0]\n\n\t\t\treturn se",
"def set_step_status(self, step_summary: str, status: str = Status.PASSED,\n message: str = None):\n temp = {Result.__STEP: step_summary, Result.__STATUS: status,\n Result.__MESSAGE: message}\n self.__run.append(temp)",
"def getStatusWord(status):\n statusWord = 'owned'\n if status == 0:\n return 'wished'\n elif status == 1:\n return 'ordered'\n return statusWord",
"def startNextStep(steps, run_status, prog_args):\n\n def getNextStep():\n if not steps:\n return None\n return steps.pop(0)\n try:\n s = getNextStep()\n if hasattr(prog_args, 'step_regex'):\n while s and not prog_args.step_regex.search(s.name):\n print >>sys.stderr, 'skipping step: ' + s.name\n s = getNextStep()\n if hasattr(prog_args, 'stepreject_regex'):\n while s and prog_args.stepreject_regex.search(s.name):\n print >>sys.stderr, 'skipping step: ' + s.name\n s = getNextStep()\n except StopIteration:\n s = None\n if not s:\n return finished()\n\n print >>sys.stderr, 'performing step: ' + s.name,\n s.step_status.stepStarted()\n d = defer.maybeDeferred(s.startStep, s.buildslave)\n d.addCallback(lambda x: checkStep(x, steps,\n run_status, prog_args))\n d.addErrback(lambda x: buildException(run_status, x))\n return d",
"def getStepRecord(self, phase):\n ent = self._records.get(phase, None)\n if hasattr(ent, \"append\"): # Yurk!\n seq = ent\n for ent in seq:\n if ent.hasFailed:\n return ent\n return seq.entries[0]\n if hasattr(ent, \"entries\"): # Double yurk!\n seq = ent.entries\n for ent in seq:\n if ent.hasFailed:\n return ent\n if seq:\n return seq[0]\n return\n return ent",
"def dump_step(self,status):\n\n L = self.level\n stats.add_to_stats(step=status.step, time=status.time, type='timing_step', value=time.time()-self.t0)\n stats.add_to_stats(step=status.step, time=status.time, type='niter', value=status.iter)\n stats.add_to_stats(step=status.step, time=status.time, type='residual', value=L.status.residual)\n\n pass",
"def active_result(self):\n return self.step_client.previous_step_result()",
"def step(self):\n return self._step",
"def next_status(self):\n if self.status == self.DRAFT:\n self._advance_to_registration()\n elif self.status == self.REGISTRATION:\n self._advance_to_pending()\n elif self.status == self.PENDING:\n self._advance_to_running()",
"def get_first_step(self):\n return self.get_step_by_index(0)",
"def select_by_status(status):\n sql = 'checkStatus'\n val = [status]\n rows = DBconnector.call_procedure(sql, val)\n for r in rows:\n return _wrap_in_parcel_list(r.fetchall())",
"def get_status(cls, request, status):\n event_status = request.dbsession.query(cls)\\\n .filter_by(status=status).one()\n return event_status"
] | [
"0.6839485",
"0.6071293",
"0.55562955",
"0.5460878",
"0.53241044",
"0.5310991",
"0.53065586",
"0.52873224",
"0.5144956",
"0.5040669",
"0.503265",
"0.503265",
"0.5010804",
"0.49962842",
"0.49321508",
"0.48371494",
"0.4831955",
"0.4813281",
"0.48044667",
"0.47959918",
"0.4792894",
"0.47602352",
"0.47537318",
"0.47333282",
"0.4728244",
"0.47255507",
"0.47227198",
"0.4722065",
"0.47217023",
"0.46959275"
] | 0.7587236 | 0 |
Returns SSM document step by a given name. | def _get_step_by_name(self, steps, step_name):
if steps:
for s in steps:
if s['StepName'] == step_name:
return s | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_step_by_name(self, name):\n self._validate_step_name(name)\n name = str(name)\n try:\n return self.all_upstream_steps[name]\n except KeyError as e:\n msg = 'No Step with name \"{}\" found. ' \\\n 'You have following Steps: {}'.format(name, list(self.all_upstream_steps.keys()))\n raise StepError(msg) from e",
"def get_step_by_name(self, step_name, case_sensitive=True):\n logger.debug(\"Steps are '%s', looking for step '%s'.\", [step.name for step in self.steps], step_name)\n for step in self.steps:\n if case_sensitive:\n if step.name == step_name:\n return step\n else:\n if step.name.lower() == step_name.lower():\n return step\n raise ValueError(\"A ticket step with the name '%s' could not be found.\", step_name)",
"def step_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"step_name\")",
"def get_task_by_name(self, task_name):\n for task in self.tasks:\n if task.name == task_name:\n logger.debug(\"Returning task with name '%s': '%s'\", task_name, task.to_xml_string())\n return task\n raise ValueError(\"A step task with the name {} can not be found.\".format(task_name))",
"def get_step_argument(self, name):\n sa = StepArgument.get(self._symbol)\n if sa:\n return sa.get(name)",
"def step_name(self):\n return self._step_name",
"def get_document(name):\n document = [d for d in documents if d.name == name]\n if len(document) > 0:\n return document[0]",
"def get_by_name(name):\n for scenario in discover.itersubclasses(Scenario):\n if name == scenario.__name__:\n return scenario\n raise exception.NoSuchScenario(name=name)",
"def get_named_document(self, entity, name):\n view = self.db.view(\"%s/name\" % entity, include_docs=True)\n result = view[name]\n if len(result) != 1:\n raise ValueError(\"no such %s document '%s'\" % (entity, name))\n return result.rows[0].doc",
"def step(self, uuid):\n return self.__get_object(self.get(\"steps/{}\".format(uuid)))",
"def getStep():\n # TODO: can there be non-Step logs?",
"def step( self, name ):\n duration = self.summarize_step( start=self.step_start, step_name=name, level=self.level )\n now = time.time()\n self.step_start = now\n return duration",
"def getCurrentStep():",
"def getName(self):\n return self.stepDictionary[self.getLocation()]",
"def get_execution_step_url(self, execution_id: str, step_name: str, steps: [] = None) -> str:\n if not steps or len(steps) < 1:\n execution = self.ssm_client.get_automation_execution(AutomationExecutionId=execution_id)\n steps = execution['AutomationExecution']['StepExecutions']\n\n step = self._get_step_by_name(steps, step_name)\n if not step:\n raise Exception(f'SSM document step [{step_name}] does not exist in execution: '\n f'{self.get_execution_url(execution_id)}')\n step_execution_id = step['StepExecutionId']\n step_index = self._get_step_execution_index(steps, step_name)\n return f'https://{self.region}.console.aws.amazon.com/systems-manager/automation/execution/{execution_id}' \\\n f'/step/{step_index}/{step_execution_id}'",
"def get_param(self, step_id, name):\n step_params = self._params.get(step_id)\n return step_params.get(name) if step_params else None",
"def get_story_by_name(self, story_name):\n return Story.get_by_name(story_name)",
"def get_step(self):\n return self.step",
"def get_step(self):\n return self.step",
"def step_name(self):\n return \"main\"",
"def get_segment_by_name(self, name):\n for seg in self.segments:\n if seg.segname == name:\n return seg\n\n return None",
"def step_name(self, index):\n step_label = self.get_step_label_at_index(index)\n if step_label is not None:\n return step_label\n return self.get_step_class_at_index(index).get_lookup_class().__name__",
"def Do(name, *nets_or_steps):\n nets_or_steps = _MakeList(nets_or_steps)\n if (len(nets_or_steps) == 1 and isinstance(\n nets_or_steps[0], core.ExecutionStep)):\n return nets_or_steps[0]\n else:\n return core.scoped_execution_step(\n _get_next_step_name('Do', name), nets_or_steps)",
"def getbyname(self, name, doctype='experiment'):\n\n if doctype not in self.documents:\n self.documents[doctype] = esd.search(self.source, doctype)\n return self.documents[doctype].load_document(name)",
"def get_seq_by_name(self, name: str) -> Optional['Sequencer']:\n for seq in self.Sequencers:\n if seq.Name.lower() == name.lower():\n return seq\n return None",
"def extract_step(path):\n file_name = os.path.basename(path)\n return int(file_name.split('-')[-1])",
"def get_scn(name: str) -> typing.TextIO:\n scenario = os.path.sep.join((os.path.abspath(__file__).split(os.path.sep)[:-1] + ['scenarios', name + \".scn\"]))\n if not os.path.exists(scenario):\n raise IOError(\"Scenario '{}' is not accessible.\".format(scenario))\n\n return open(scenario, 'r')",
"def get_pars(self, step_name):\n step_list = ['alignment', 'astrodrizzle', 'catalog generation', 'quality control']\n if step_name in step_list:\n return self.pars[step_name].outpars\n else:\n log.critical(\"'{}' is not a recognized step name.\".format(step_name))\n log.critical(\"Recognized step names: \\n{}\".format(str(step_list)[2:-2].replace(\"', '\", \"\\n\")))\n sys.exit(1)",
"def step(step_name, extra_types=None):\n\n def decorator(func):\n # Register the step, other way return the function unchanged\n step_function = StepFunction(func, step_name, extra_types)\n # Check for similar steps, in both directions\n step_function.search_and_report_similar()\n # Register it\n data.add_step(step_function)\n return func\n\n return decorator",
"def get_stimulus_index(data, stim_name):\n for i_stim, stim_data in enumerate(data['stimuli']):\n if stim_name in stim_data['stim_path']:\n return i_stim\n\n raise KeyError('Stimulus with stim_name={} not found!'.format(stim_name))"
] | [
"0.69454134",
"0.6490663",
"0.61872965",
"0.6077294",
"0.6018053",
"0.58520585",
"0.57159656",
"0.5648309",
"0.5618685",
"0.5614397",
"0.5590065",
"0.54544705",
"0.5426001",
"0.54098606",
"0.5397997",
"0.5348861",
"0.53418255",
"0.5339116",
"0.5339116",
"0.53089184",
"0.5294009",
"0.52896565",
"0.5230244",
"0.52247214",
"0.52044743",
"0.51281244",
"0.50635076",
"0.5059036",
"0.50521743",
"0.5037523"
] | 0.7589914 | 0 |
Returns True if SSM document for given name exist, False otherwise. | def _document_exists(self, document_name):
return len(self.ssm_client.list_document_versions(Name=document_name)['DocumentVersions']) >= 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def object_exists(self, name: str):\n file_path = self.__get_file_path(name)\n return os.path.exists(file_path)",
"def exists(self) -> bool:\n doc_ref = self.doc_ref\n if isinstance(doc_ref, DocumentReference):\n return doc_ref.get().exists\n return False",
"def document_exists(self, document_id):\n document_id = document_id.strip()\n if not document_id:\n return False\n\n connection = self.__get_database_connection()\n response = self.__make_request(connection,\n '/%s/%s' % (self.database_name, document_id),\n method='HEAD')\n return bool(response)",
"def exists(self, name):\n return self.backend.exists(name)",
"def exists(self, name):\n try:\n self.container.get_object(name)\n return True\n except NoSuchObject:\n return False",
"def exists(self, name):\n raise NotImplementedError()",
"def exists(self, name):\n # django 判断文件名是否可用\n return False # 代表就是可用的新文件",
"def object_exists(self, fname):\n return False",
"def definition_exists(name: str) -> bool:\n try:\n return bool(lookup_definition(name))\n except:\n return False",
"def object_exists(self, fname):\n return True",
"def exist(name: str) -> bool:\n return bool(os.path.exists(name))",
"def exists(self, name):\n self.connect()\n self._write('EXISTS %s\\r\\n' % name)\n return self._get_numeric_response()",
"def exists(self, name):\n return self.endpoint.exists(name)",
"def document_exists(self, docid):\n raise NotImplementedError",
"def object_exists(self, fname):\n return self.object_exists",
"def doc_exist(self, docid):\n doc = Document(self.cloudant_database, docid)\n return doc.exists()",
"def is_section_exist(self, section_name: str) -> bool:\n pass",
"def exists(self, name):\n assert name, \"Must input a valid dataset name.\"\n return name in self.manager.data[\"dataset\"]",
"async def exists(self, tag_name):\n try:\n if await self.get_id(tag_name):\n return True\n except RtbDoesntExists:\n return False",
"def exists(self, key_name: str) -> bool:\n pass",
"def exists(self):\n return True",
"def exists(self):\n return True",
"def exists(request, pagename, filename):\n fpath = getFilename(request, pagename, filename)\n return os.path.exists(fpath)",
"def __contains__(self, name):\n return (self.model_dir / (str(name) + '.pkl')).exists()",
"def exists(request_handler, name) -> bool:\n data = request_handler.make_request(\n 'GET',\n '/reports'\n )\n for item in data:\n if item['reportName'] == name:\n return True\n return False",
"def exists(self, name):\n return name in self.cache",
"def has_file(self, name):\n return name in self.files",
"def notebook_exists(self, name, path=''):\n\n\t\tos_path = self._get_os_path(name, path=path)\n\t\treturn key_exists(self.bucket, os_path)",
"def file_exists(self):\n if os.path.isfile(self.file_name):\n return True\n else:\n return False",
"def entry_exists(title):\n try:\n f = default_storage.open(f\"entries/{title}.md\")\n return True\n\n except FileNotFoundError:\n return False"
] | [
"0.6926537",
"0.67250514",
"0.6712991",
"0.67112947",
"0.6679739",
"0.6626868",
"0.6602372",
"0.65388644",
"0.6513464",
"0.65095633",
"0.650585",
"0.649972",
"0.64888805",
"0.6445123",
"0.64327896",
"0.6388695",
"0.6188683",
"0.61496204",
"0.61428565",
"0.6134926",
"0.60890263",
"0.60890263",
"0.60878944",
"0.6087399",
"0.60873246",
"0.6077549",
"0.6059625",
"0.6048553",
"0.60375875",
"0.6036162"
] | 0.7727576 | 0 |
Return ssm document execution step URL. | def get_execution_step_url(self, execution_id: str, step_name: str, steps: [] = None) -> str:
if not steps or len(steps) < 1:
execution = self.ssm_client.get_automation_execution(AutomationExecutionId=execution_id)
steps = execution['AutomationExecution']['StepExecutions']
step = self._get_step_by_name(steps, step_name)
if not step:
raise Exception(f'SSM document step [{step_name}] does not exist in execution: '
f'{self.get_execution_url(execution_id)}')
step_execution_id = step['StepExecutionId']
step_index = self._get_step_execution_index(steps, step_name)
return f'https://{self.region}.console.aws.amazon.com/systems-manager/automation/execution/{execution_id}' \
f'/step/{step_index}/{step_execution_id}' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def invoke_url(self) -> pulumi.Output[str]:\n return self.stage.invoke_url # type: ignore[no-any-return]",
"def get_redirect_url(self, *args, **kwargs):\n return self.document.file.url",
"def runbook_url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"runbook_url\")",
"def prog_url(self):\n # type: () -> string_types\n return self._prog_url",
"def get_execution_url(self, execution_id: str) -> str:\n return f'https://{self.region}.console.aws.amazon.com/systems-manager/automation/execution/{execution_id}'",
"def runbook_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"runbook_url\")",
"def runbook_url(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"runbook_url\")",
"def targetUrl(self):\n domain = self.site.get('DOMAIN', None)\n if not domain:\n return u''\n return SiteProcessUtils.getUrlFromPath(self.site, domain, self.targetPath)",
"def _get_step_output_uri(self, step):\n # parse in reverse order, in case there are multiple -output args\n args = step.args()\n for i, arg in reversed(list(enumerate(args[:-1]))):\n if arg == '-output':\n return args[i + 1]\n else:\n return None",
"def get_target_url(self):\n return self.TARGET_URL",
"def getDefinitionURLString(self):\n return _libsbml.ASTNode_getDefinitionURLString(self)",
"def _get_document_url_for (self, component):\n return self.base_url + self.urls[component]",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"url\")",
"def url(self):\n url = os.environ.get('PATH_INFO')\\\n or os.environ.get('REQUEST_URI')\n return url if url else ''",
"def url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"url\")",
"def url(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"url\")",
"def url(self):\r\n return self.urlparts.geturl()",
"def execution_url_template(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"execution_url_template\")",
"def getDefinitionURL(self):\n return _libsbml.ASTNode_getDefinitionURL(self)",
"def pr_url(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"pr_url\")",
"def step_file_path(self, string):\n if not self.has_step_field(string):\n return None\n # TODO handle url\n root_dir = self.root_dir()\n if root_dir:\n path = os.path.join(root_dir, self.step_field(string))\n return os.path.realpath(path)\n return os.path.realpath(self.step_field(string))",
"def url(self) -> str:\n return self._request.url.path",
"def etlWorkflowUrl(self):\n return self.sdaUrl + \"/workflows/_etl\"",
"def uri(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"uri\")"
] | [
"0.6653019",
"0.6429969",
"0.6305271",
"0.6265999",
"0.6141591",
"0.60876673",
"0.60876673",
"0.60759395",
"0.6074171",
"0.597044",
"0.5946428",
"0.5923328",
"0.5830999",
"0.5830999",
"0.5830999",
"0.5830999",
"0.5830999",
"0.5830999",
"0.5830999",
"0.5813061",
"0.5805103",
"0.5805103",
"0.57721376",
"0.5756133",
"0.57305056",
"0.5720783",
"0.56945086",
"0.56764776",
"0.56612754",
"0.5647479"
] | 0.6712275 | 0 |
Returns SSM document step execution sequence index | def _get_step_execution_index(self, step_executions: [], step_name):
index = 1
for step_execution in step_executions:
if step_name == step_execution['StepName']:
return index
index += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_sequence_index(self):\n\t\treturn call_sdk_function('PrlBootDev_GetSequenceIndex', self.handle)",
"def step_index(self, step):\n return self.steps.index(step)",
"def get_step_idx(self, step_id: str) -> int:\n return self.step_id2idx.get(step_id, None)",
"def step_id(self) -> pulumi.Output[Optional[int]]:\n return pulumi.get(self, \"step_id\")",
"def get_sequence(self):\n self.__sequence = self.__sequence + 1\n return self.__sequence - 1",
"def next_run_idx(self):\n return self.num_runs",
"def get_step_index(self, step=None):\n if step is None:\n step = self.steps.current\n return self.get_form_list().keys().index(step)",
"def progress_step(inst) -> Any:\n try:\n return TestStatus.stages().index(inst)\n except ValueError:\n return -1",
"def idx(self):\n return int(self.__ph.get('idx', 0))",
"def index(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"index\")",
"def sequence_number(self):\n # type: () -> int\n return self._sequence_number",
"def sequence_number(self):\n return self._annotations.get(EventData.PROP_SEQ_NUMBER, None)",
"def step_id(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"step_id\")",
"def _get_next_sequence_number(self):\n cur = self._next_sequence_number\n self._next_sequence_number += 1\n return cur",
"def timestep_idx(self, timestep):\n timestep = pd.to_datetime(timestep)\n idx = np.where(self.time_index == timestep)[0][0]\n\n return idx",
"def getCurrentStep():",
"def current_step(self):\n try:\n last_line = tail(path.join(self.run_dir, \"out.txt\"), 8)\n except FileNotFoundError:\n return -1\n if not last_line: # Empty file\n return -1\n if re.search(\"now at t\", last_line[-1]):\n # Unless the line was incomplete, there should be a match with:\n a = re.match(r\".* n = *(.*?)$\", last_line[-1])\n if a:\n return int(a.group(1))\n # Otherwise, try the previous one\n a = re.match(r\".* n = *(.*?)$\", last_line[-2])\n if a:\n return int(a.group(1))\n else:\n return -1 # Some error exists in the file\n\n elif \" Osiris run completed normally\\n\" in last_line:\n return self.total_steps\n else:\n return -1",
"def attempt_sequence_number(self):\n return self._attempt_sequence_number",
"def sequence_number(self):\n return self._sequence_number",
"def getSectionIndex(self) -> int:\n ...",
"def step_id(self):\n return self._step_id",
"def find_starting_step_index(self, data_inputs) -> int:\n for index, (step_name, step) in enumerate(reversed(self.steps_as_tuple)):\n if isinstance(step, ResumableStepMixin) and step.should_resume(data_inputs):\n return len(self.steps_as_tuple) - index - 1\n return 0",
"def find_step(self):\n for p in enumerate(self.get_decoder_paths()):\n full_path = p[1] + \".data-00000-of-00001\"\n file = Path(full_path)\n if not file.exists():\n return p[0]\n\n return -1",
"def get_steps_num():\n return 0",
"def idx(self):\n return self._idx",
"def index(self) -> int:\r\n return self._index",
"def _step(self) -> int:\n return self._config[CONF_STEP]",
"def run_id(self) -> str:\n return self._step_execution_context.run_id",
"def index(self) -> int:",
"def run_number(self):\n return self._runNumber"
] | [
"0.68235666",
"0.6603684",
"0.6303389",
"0.63007843",
"0.627349",
"0.61597484",
"0.6039806",
"0.60359675",
"0.60088104",
"0.59596723",
"0.595375",
"0.5940087",
"0.5922751",
"0.59192324",
"0.5902262",
"0.5867469",
"0.5863946",
"0.58638436",
"0.58619946",
"0.58510906",
"0.58207697",
"0.5741322",
"0.571984",
"0.5718108",
"0.57083946",
"0.5708042",
"0.5701436",
"0.5699693",
"0.56931156",
"0.5681307"
] | 0.7016882 | 0 |
Eviction filings broken down into a weekbyweek basis | def weekly(evictiondata):
evictions_per_week = {}
for index, row in evictiondata.iterrows():
if row['week_date'] not in evictions_per_week.keys():
evictions_per_week[row['week_date']] = row['filings_2020']
else:
evictions_per_week[row['week_date']] += row['filings_2020']
return evictions_per_week | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def weekly():",
"def weekly():\n\n response = {}\n\n # 0..6 => Sunday..Saturday\n for i in range(7):\n hours = []\n interactions = 0\n\n for j in range(25):\n try:\n wfile = open(common.stats_path + '/weekly-average/' + str(i) + '/' + str(j))\n data = wfile.read()\n\n if j == 24:\n interactions = int(data)\n else:\n hours.append(int(data))\n\n wfile.close()\n except IOError:\n if i < 24:\n hours.append(0)\n\n response[DAYS[i]] = {'hours': hours, 'interactions': interactions}\n\n return response",
"def graphify(evictions_per_week):\r\n weeks = []\r\n for week in evictions_per_week.keys():\r\n if '2020' in week:\r\n weeks.append(week)\r\n evictions_filed = []\r\n for week in weeks:\r\n evictions_filed.append(evictions_per_week[week])\r\n plt.figure(figsize=(50, 10))\r\n plt.plot(weeks, evictions_filed)\r\n plt.xlabel('Date')\r\n plt.ylabel('Evictions filed')\r\n plt.title('Evictions filed by the week')\r\n plt.show()\r\n return weeks, evictions_filed",
"def GetListOfWeeks(self):\n delta_days = (self.GetFridayOfLastFullWeek() - self.START_DATE).days\n delta_weeks = int(math.floor(delta_days / 7))\n weeks = [self.START_DATE + dt.timedelta(days=7 * x) \n for x in range(0, delta_weeks + 1)]\n weeks = [week.strftime('%Y-%m-%d') for week in weeks]\n self.cursor.execute(\n 'SELECT DISTINCT week FROM %s' % self.BOX_OFFICE_TABLE)\n weeks_in_table = [x[0] for x in self.cursor.fetchall()]\n weeks = list(set(weeks) - set(weeks_in_table))\n weeks.sort() \n return weeks",
"def week_report_handle(fans_type):\n\t#import pdb;pdb.set_trace()\n\tlast_day = datetime.date.today()-timedelta(days=datetime.datetime.today().weekday() + 1)\n\ttoday = datetime.date.today()\n\n\tfans_pages = FansPage.objects.filter(fans_type=fans_type, date__gte=last_day, date__lte=today).order_by(\"date\")\n\n\tstart = fans_pages[0]\n\tlast = fans_pages[len(fans_pages) - 1]\n\n\t#talk_about_is = (last.talk_about_is - start.talk_about_is) / (start.talk_about_is + 0.0) * 100\n\ttalk_about_is = (last.talk_about_is - start.talk_about_is)\n\t#total_like_count = (last.total_like_count - start.total_like_count) / (start.total_like_count + 0.0) * 100\n\ttotal_like_count = (last.total_like_count - start.total_like_count)\n\t#total_fans = (last.total_fans - start.total_fans) / (start.total_fans + 0.0) * 100\n\ttotal_fans = (last.total_fans - start.total_fans)\n\treturn {\"talk_about_is\":talk_about_is, \"total_like_count\":total_like_count, \"total_fans\":total_fans, \"start\":start.date, \"last\":last.date}",
"def get_rollover_weeks(shop):\n d = {}\n ods, r = get_rollovers(shop)\n\n for od in ods:\n week = int(od.eta.strftime('%W'))+1\n if d.has_key(week):\n d[week] += int(od.plan)\n else:\n d[week] = int(od.plan)\n\n # remove the pulled from this week\n this_week = int(datetime.datetime.today().strftime('%W'))+1 \n if d.has_key(this_week):\n d[this_week] = d[this_week] - get_pulled(shop)[1] \n\n # build the return list of (week, '00:00') tuples\n l = []\n d = sorted(d.items()) # sort dictionary by week\n for key, minutes in d:\n formatted_time = _get_display_hours(minutes)\n l.append((key,formatted_time))\n\n return l",
"def testWeeklyOvertimes(self):\n dates = self.dates\n for day_num in xrange(28, 31):\n dates.append(utils.add_timezone(\n datetime.datetime(2011, 4, day_num)\n ))\n for day_num in xrange(5, 9):\n dates.append(utils.add_timezone(\n datetime.datetime(2011, 5, day_num)\n ))\n for day in dates:\n self.make_logs(day)\n\n def check_overtime(week0=Decimal('55.00'), week1=Decimal('55.00'),\n overtime=Decimal('30.00')):\n self.login_user(self.superuser)\n response = self.client.get(self.url, self.args)\n weekly_totals = response.context['weekly_totals'][0][0][0][2]\n self.assertEqual(weekly_totals[0], week0)\n self.assertEqual(weekly_totals[1], week1)\n self.assertEqual(weekly_totals[5], overtime)\n check_overtime()\n #Entry on following Monday doesn't add to week1 or overtime\n self.make_logs(utils.add_timezone(datetime.datetime(2011, 5, 9)))\n check_overtime()\n #Entries in previous month before last_billable do not change overtime\n self.make_logs(utils.add_timezone(datetime.datetime(2011, 4, 24)))\n check_overtime()\n #Entry in previous month after last_billable change week0 and overtime\n self.make_logs(utils.add_timezone(\n datetime.datetime(2011, 4, 25, 1, 0)\n ))\n check_overtime(Decimal('66.00'), Decimal('55.00'), Decimal('41.00'))",
"def read_weekly_breakdown_statuses(self):\n from itertools import repeat\n\n self.ID7_GNIP_BREAKDOWN = kpi_from_db_config.ID7_GNIP_BREAKDOWN\n self.ID7_STREAM_BREAKDOWN = kpi_from_db_config.ID7_STREAM_BREAKDOWN\n self.ID7_SEED_BREAKDOWN = kpi_from_db_config.ID7_SEED_BREAKDOWN\n self.ID7_MENTION_BREAKDOWN = kpi_from_db_config.ID7_MENTION_BREAKDOWN\n \n list_id = [self.ID7_GNIP_BREAKDOWN, \n self.ID7_STREAM_BREAKDOWN, \n self.ID7_SEED_BREAKDOWN, \n self.ID7_MENTION_BREAKDOWN]\n\n list_result = [[] for i in repeat(None,len(list_id))]\n for i in range(len(list_id)):\n self.cursor.execute('''\n SELECT value\n FROM public.kpi_report\n WHERE id = %s\n ORDER BY created_at DESC\n LIMIT 6\n ''', [list_id[i]])\n rows_count = self.cursor.rowcount\n \n if (rows_count == 6): # 6 is LIMIT from the query\n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n elif (rows_count >= 1 and rows_count < 6): # Change rows_count > 0 and rows_count < Number of limit\n for doc in self.cursor:\n list_result[i].append(int(doc[0]))\n list_result[i] = list_result[i] + [0] * (6 - rows_count) \n else:\n list_result[i] = [0] * 6\n\n return list_result",
"def _get_current_week_entries(today, user):\n some_day = today + timedelta(days=1)\n monday_of_week = some_day - timedelta(days=(some_day.isocalendar()[2] - 1))\n sunday_of_week = monday_of_week + timedelta(days=6)\n weekevents = TimeEntry.objects.filter(\n booking_date__gte=monday_of_week, booking_date__lt=sunday_of_week, user=user\n )\n return weekevents",
"def return_weekly_figure():\n today = datetime.datetime.now()\n\n while 1:\n try:\n today_str = str(today.day) + \"/\" + \"{:02d}\".format(today.month) + \"/\" + str(today.year)\n match = covid_table.find(date=today_str)\n match.next()\n running_total = 0\n for i in range(7):\n running_total += return_daily_figure(today)\n today = today - datetime.timedelta(days=1)\n average_dose_per_day = round(running_total/7)\n return running_total, average_dose_per_day \n except:\n today = today - datetime.timedelta(days=1)",
"def weekly_viewed(df):\n df = convert_to_datetime(df)\n today = datetime.date.today()\n this_week_start = today - timedelta(days=7)\n last_week_start = today - timedelta(days=14)\n week_per_min = []\n lastweek_per_min = []\n thisweek_viewed = []\n lastweek_viewed = []\n for index, row in df.iterrows():\n if row['session_start'].date() >= this_week_start:\n per_min = get_cards_per_min(row)\n week_per_min.append(per_min)\n thisweek_viewed.append(row['total_looked_at'])\n if last_week_start <= row['session_start'].date() < this_week_start:\n per_min = get_cards_per_min(row)\n lastweek_per_min.append(per_min)\n lastweek_viewed.append(row['total_looked_at'])\n week_viewed_result = total_viewed(thisweek_viewed, lastweek_viewed)\n week_viewed_result['total_viewed_weekly'] = week_viewed_result.pop('total_viewed')\n\n return week_viewed_result",
"def get_week_prediction(classifier, shift_ratios, needs, remove_ratios, remove_needs,\n max_scale_value):\n days_of_week = ['sunday', 'monday', 'tuesday', 'wednesday', 'thursday', 'friday', 'saturday']\n predictions = {}\n for day in days_of_week:\n prediction = get_day_of_week_shift_prediction(\n classifier, day, shift_ratios, needs, remove_ratios, remove_needs, max_scale_value\n )\n predictions[day] = prediction\n\n return predictions",
"def weekly_resample(data):\n data = data.resample('W-MON').sum()\n return data",
"def news_for_week(self):\n\n raise NotImplementedError",
"def __init__(self, y, w):\n for d in xrange(-10, 370):\n date = datetime.date(y, 1, 1) + datetime.timedelta(d)\n if date.isocalendar() == (y, w, 1):\n date_a = date\n break\n else:\n raise ValueError(\"Invalid week\")\n date_b = date_a + datetime.timedelta(7)\n super(Week, self).__init__(date_a, date_b)",
"def graph_baseline(evictiondata, weeks):\r\n base_evictions_per_week = {}\r\n for index, row in evictiondata.iterrows():\r\n if row['week_date'] not in base_evictions_per_week.keys():\r\n base_evictions_per_week[row['week_date']] = row['filings_avg']\r\n elif row['GEOID'] != 'sealed':\r\n base_evictions_per_week[row['week_date']] += row['filings_avg']\r\n base_evictions_filed = []\r\n for week in weeks:\r\n base_evictions_filed.append(base_evictions_per_week[week])\r\n\r\n plt.figure(figsize=(50, 10))\r\n plt.plot(weeks, base_evictions_filed, color='orange')\r\n plt.title('Base Evictions filed by the week')\r\n plt.xlabel('Date')\r\n plt.ylabel('Evictions filed')\r\n plt.show()\r\n return base_evictions_filed",
"def test_weekly_resolution_hindcast(daily_initialized, daily_obs):\n weekly_hindcast = daily_initialized.resample(init=\"W\").mean()\n weekly_obs = daily_obs.resample(time=\"W\").mean()\n weekly_hindcast.lead.attrs[\"units\"] = \"weeks\"\n assert compute_hindcast(weekly_hindcast, weekly_obs).all()",
"def forecast_weekly():\n forecast = get_forecast()\n daily = forecast.daily()\n return daily.summary",
"def getWeeks(data: Sequence[HistoryElement]) -> Sequence[int]:\r\n _checkData(data)\r\n return [x.timeStamp.toDateTime().weekday() for x in data]",
"def hr_report():\n\n # Load the peak data.\n db = Persistence()\n if not (activities := db.load_all()):\n print(\"No data to report on\")\n return\n\n # Find the maximum for each value.\n max = _load_max_values(activities)\n\n # Totals for the current week\n week_distance_total = 0\n week_elevation_total = 0\n week_duration_total = timedelta()\n week_work_days = 0\n week_5sec_average = []\n week_30sec_average = []\n week_60sec_average = []\n week_5min_average = []\n week_10min_average = []\n week_20min_average = []\n week_30min_average = []\n week_60min_average = []\n week_90min_average = []\n week_120min_average = []\n\n # Print the peak data for each week.\n current_weekday = None\n for activity in activities:\n\n # Time to break to a new week?\n if current_weekday is None or current_weekday > activity.start_time.weekday():\n if current_weekday:\n _print_footer(\n week_distance_total=week_distance_total,\n week_elevation_total=week_elevation_total,\n week_duration_total=week_duration_total,\n week_work_days=week_work_days,\n week_5sec_average=week_5sec_average,\n week_30sec_average=week_30sec_average,\n week_60sec_average=week_60sec_average,\n week_5min_average=week_5min_average,\n week_10min_average=week_10min_average,\n week_20min_average=week_20min_average,\n week_30min_average=week_30min_average,\n week_60min_average=week_60min_average,\n week_90min_average=week_90min_average,\n week_120min_average=week_120min_average,\n )\n week_distance_total = 0\n week_elevation_total = 0\n week_duration_total = timedelta(0)\n week_work_days = 0\n week_5sec_average = []\n week_30sec_average = []\n week_60sec_average = []\n week_5min_average = []\n week_10min_average = []\n week_20min_average = []\n week_30min_average = []\n week_60min_average = []\n week_90min_average = []\n week_120min_average = []\n\n _print_header()\n\n # Capture the weekday.\n if current_weekday is None or current_weekday != activity.start_time.weekday():\n week_work_days = week_work_days + 1\n\n current_weekday = activity.start_time.weekday()\n\n # Print the detail.\n _print_detail(activity, max)\n\n # Find the duration.\n duration = activity.end_time - activity.start_time\n\n # Accumulate for this week\n week_distance_total = week_distance_total + activity.distance\n if activity.elevation:\n week_elevation_total = week_elevation_total + activity.elevation\n week_duration_total = week_duration_total + duration\n week_5sec_average.append(activity.peak_5sec_hr)\n week_30sec_average.append(activity.peak_30sec_hr)\n week_60sec_average.append(activity.peak_60sec_hr)\n if activity.peak_5min_hr:\n week_5min_average.append(activity.peak_5min_hr)\n if activity.peak_10min_hr:\n week_10min_average.append(activity.peak_10min_hr)\n if activity.peak_20min_hr:\n week_20min_average.append(activity.peak_20min_hr)\n if activity.peak_30min_hr:\n week_30min_average.append(activity.peak_30min_hr)\n if activity.peak_60min_hr:\n week_60min_average.append(activity.peak_60min_hr)\n if activity.peak_90min_hr:\n week_90min_average.append(activity.peak_90min_hr)\n if activity.peak_120min_hr:\n week_120min_average.append(activity.peak_120min_hr)\n\n # Final footer.\n _print_footer(\n week_distance_total=week_distance_total,\n week_elevation_total=week_elevation_total,\n week_duration_total=week_duration_total,\n week_work_days=week_work_days,\n week_5sec_average=week_5sec_average,\n week_30sec_average=week_30sec_average,\n week_60sec_average=week_60sec_average,\n week_5min_average=week_5min_average,\n week_10min_average=week_10min_average,\n week_20min_average=week_20min_average,\n week_30min_average=week_30min_average,\n week_60min_average=week_60min_average,\n week_90min_average=week_90min_average,\n week_120min_average=week_120min_average,\n )\n\n # Print the summary.\n _print_summary(max)",
"def run_week_observations(period_begin, period_end):\n observs = []\n # get all dates in the period range and find all unique weeknumbers\n all_dates = list(pd.date_range(period_begin, period_end))\n weeknumbers = list(set([x.isocalendar()[:2] for x in all_dates]))\n\n # get all the begin and end dates of the observable week (so the date of the monday and friday)\n # https://stackoverflow.com/questions/17087314/get-date-from-week-number\n all_periods = []\n for numb in weeknumbers:\n mon_date = datetime.strptime(f\"{numb[0]}-W{numb[1]}\" + '-1', '%G-W%V-%u')\n fri_date = mon_date + timedelta(4)\n all_periods.append((mon_date, fri_date))\n\n # run a new observation if the week hasn't been observerd\n if len(all_periods) > 0:\n for period in all_periods:\n # retrieve all data over the stocks in this period\n data = Stocks.objects.filter(date__range=period)\n if len(data) > 0:\n # convert the data to a dataframe\n q = data.values('component', 'indexx', 'date', 's_close')\n df_data = pd.DataFrame.from_records(q)\n\n # prepare the data for the analysis\n df_data.rename(columns={\"s_close\": \"close\"}, inplace=True)\n df_data['close'] = df_data['close'].astype('float')\n\n # load in the sector data and add it to the dataframe\n with open(r\"./articles_app/data/sectorcompany.json\") as f:\n sector_info = json.load(f)\n df_data[\"sector\"] = df_data[\"component\"].apply(lambda x: sector_info.get(x))\n df_data.dropna(inplace=True)\n\n # run the analyser to find observations\n analyse = Analyse(df_data, *period)\n analyse.find_weekly_observations()\n observs.extend(analyse.observations)\n return observs",
"def getWeeks(year):\n url = \"http://www.boxofficemojo.com/weekend/?yr=%d\" % year\n src = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(src, 'html.parser')\n chart = soup.find(border=\"0\", cellspacing=\"1\", cellpadding=\"5\")\n data = parseTable(chart)\n weeks = [int(row[-1]) for row in data[1:]]\n return weeks",
"def normalize_weekly(data):\n if \"tblMenu\" not in data[\"result_data\"][\"Document\"]:\n data[\"result_data\"][\"Document\"][\"tblMenu\"] = []\n if isinstance(data[\"result_data\"][\"Document\"][\"tblMenu\"], dict):\n data[\"result_data\"][\"Document\"][\"tblMenu\"] = [data[\"result_data\"][\"Document\"][\"tblMenu\"]]\n for day in data[\"result_data\"][\"Document\"][\"tblMenu\"]:\n if \"tblDayPart\" not in day:\n continue\n if isinstance(day[\"tblDayPart\"], dict):\n day[\"tblDayPart\"] = [day[\"tblDayPart\"]]\n for meal in day[\"tblDayPart\"]:\n if isinstance(meal[\"tblStation\"], dict):\n meal[\"tblStation\"] = [meal[\"tblStation\"]]\n for station in meal[\"tblStation\"]:\n if isinstance(station[\"tblItem\"], dict):\n station[\"tblItem\"] = [station[\"tblItem\"]]\n return data",
"def record_weeks(self, user, start, end, num=10):\n query = self.user_weeks_between(user, start, end).order_by('-plays')[:num]\n for week in query:\n date = ldates.date_of_index(week.week_idx)\n yield week, date",
"def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))",
"def distributeWeekday(self, jan1):\n self.firstday = jan1\n for day in self.daylist:\n if jan1%7 == 6 or jan1%7 == 0:\n day.weekday = 'weekend'\n jan1 = jan1 + 1",
"def week_schedule(year, stype, week):\n url = schedule_url(year, stype, week)\n try:\n dom = xml.parse(urllib.request.urlopen(url))\n except urllib.error.HTTPError:\n print >> sys.stderr, 'Could not load %s' % url\n return []\n\n games = []\n for g in dom.getElementsByTagName(\"g\"):\n gsis_id = g.getAttribute('eid')\n games.append({\n 'eid': gsis_id,\n 'wday': g.getAttribute('d'),\n 'year': year,\n 'month': int(gsis_id[4:6]),\n 'day': int(gsis_id[6:8]),\n 'time': g.getAttribute('t'),\n 'meridiem': None,\n 'season_type': stype,\n 'week': week,\n 'home': g.getAttribute('h'),\n 'away': g.getAttribute('v'),\n 'gamekey': g.getAttribute('gsis'),\n })\n\n for game in games:\n h = int(game['time'].split(':')[0])\n m = int(game['time'].split(':')[1])\n if 0 < h <= 5: # All games before \"6:00\" are PM until proven otherwise\n game['meridiem'] = 'PM'\n\n if game['meridiem'] is None:\n\n days_games = [g for g in games if g['wday'] == game['wday']]\n preceeding = [g for g in days_games if g['eid'] < game['eid']]\n proceeding = [g for g in days_games if g['eid'] > game['eid']]\n\n # If any games *after* this one are AM then so is this\n if any(g['meridiem'] == 'AM' for g in proceeding):\n game['meridiem'] = 'AM'\n # If any games *before* this one are PM then so is this one\n elif any(g['meridiem'] == 'PM' for g in preceeding):\n game['meridiem'] = 'PM'\n # If any games *after* this one have an \"earlier\" start it's AM\n elif any(h > t for t in [int(g['time'].split(':')[0]) for g in proceeding]):\n game['meridiem'] = 'AM'\n # If any games *before* this one have a \"later\" start time it's PM\n elif any(h < t for t in [int(g['time'].split(':')[0]) for g in preceeding]):\n game['meridiem'] = 'PM'\n\n if game['meridiem'] is None:\n if game['wday'] not in ['Sat', 'Sun']:\n game['meridiem'] = 'PM'\n if game['season_type'] == 'POST':\n game['meridiem'] = 'PM'\n\n return games",
"def test_weekly_resolution_perfect_model(daily_initialized, daily_obs):\n weekly_pm = daily_initialized.resample(init=\"W\").mean()\n weekly_obs = daily_obs.resample(time=\"W\").mean()\n weekly_pm.lead.attrs[\"units\"] = \"weeks\"\n assert compute_hindcast(weekly_pm, weekly_obs).all()",
"def process(raw):\n #global weekNum\n field = None\n entry = {}\n cooked = []\n number = -1\n\n for line in raw:\n log.debug(\"Line: {}\".format(line))\n line = line.strip()\n if len(line) == 0 or line[0] == \"#\":#if # is the first character, skip\n log.debug(\"Skipping\")\n continue\n parts = line.split(':')#split lines to before and after \":\"\n if len(parts) == 1 and field:#adds additional content to whatever the previously used field is\n entry[field] = entry[field] + line + \" \" \n continue\n if len(parts) == 2:#if there are 2 parts, the field is the first part and the content is the second part\n field = parts[0]\n content = parts[1]\n else:#if none of the above are correct there is an issue\n raise ValueError(\"Trouble with line: '{}'\\n\".format(line) +\n \"Split into |{}|\".format(\"|\".join(parts)))\n\n if field == \"begin\":#checking if this is the line with the start date\n try:#begin only triggers once (at least it should only trigger once)\n base = arrow.get(content, \"MM/DD/YYYY\")#get the date as an object named \"base\", will need to use this to determine start date and current week, arrow must have a \"current date\"?\n # base is the \"week 1\" date, DD = 1, DD + 7 = 2, DD + 14 = 3, DD + 21 = 4, etc\n #now i will make variables for the start date of each week, or find a way to take the difference between 2 dates\n #end = base#arrow.get(base, \"MM/DD/YYYY\")\n #end = end.shift(weeks=+10)\n #today = arrow.now()\n #today.format(\"MM/DD/YYYY\")\n #if today == base:\n # weekNum = 1\n #number = -1\n \"\"\"weeks = [base, base.shift(days=+7), base.shift(days=+14), base.shift(days=+21), base.shift(days=+28), base.shift(days=+35), base.shift(days=+42), base.shift(days=+49), base.shift(days=+56), base.shift(days=+63), base.shift(days=+70)]\n today = arrow.now()\n for i in range(0,9):\n if weeks[i] <= today <= weeks[i+1]:\n number = i+1\n if today > weeks[10]:\n number = 10\n elif today < weeks[0]:\n number = 0\n #base = arrow.format(\"MM/DD/YYYY\")\n else:\n raise ValueError(\"Big error calculating week\")\n #for index in range(1,70):\n # base = base.shift(days=+1)\n # if today == base:\n # weekNum = weekNum + (index % 7)\n # break \n base = base.format(\"MM/DD/YYYY\")\"\"\"\n except:\n raise ValueError(\"Unable to parse date {}\".format(content))#date is incorrectly formatted, should be MM/DD/YYYY\n #now I need to check if either of these weeks is the current week\n# for r in arrow.Arrow.span_range('day',\n elif field == \"week\":#this is the week number\n if entry:\n cooked.append(entry)\n entry = {}#make entry empty again\n #if content == currentWeekNum:\n #print(\"Content: \" + content)\n #print(\"Week Number: \" + currentWeekNum + \"\\n\")\n #print(\"Is Current Week?\" + currentWeekBool + \"\\n\")\n # currentWeekBool = True\n entry['topic'] = \"\"#these are all \"classes\" in the HTML document\n entry['project'] = \"\"\n entry['week'] = content#put the week number into the \"week\" field in the html document\n #entry['isCurrentWeek'] = currentWeekBool\n #currentWeekBool = False\n #if content == weekNum:\n # entry['bool'] = True\n #else:\n # entry['bool'] = True\n \"\"\"if \n if content == currentWeekNum:\n entry['isCurrentWeek'] = True\n else:\n entry['isCurrentWeek'] = False\"\"\"\n\n elif field == 'topic' or field == 'project':#from if len == 2, set the entry for the field to the content in the html doc\n entry[field] = content\n\n else:\n raise ValueError(\"Syntax error in line: {}\".format(line))\n #entryn = entry + \"\\n\"\n\t#cookedn = cooked + \"\\n\"\n\t#fieldn = field + \"\\n\"\n\t#print(\"Entry: \" + entryn)\n #print(\"Cooked: \" + cookedn)\n #print(\"Field: \" + fiieldn)\n if entry:#appends whatever added stuff to the whole docuemnt\n cooked.append(entry)\n\t#returns formatted document after it has been looped throughi\n #number = getWeekNum(raw)\n weeks = [base, base.shift(days=+7), base.shift(days=+14), base.shift(days=+21), base.shift(days=+28), base.shift(days=+35), base.shift(days=+42), base.shift(days=+49), base.shift(days=+56), base.shift(days=+63), base.shift(days=+70)]\n today = arrow.now()\n for i in range(0,9):\n if weeks[i] <= today <= weeks[i+1]:\n number = i+1\n return [cooked, i+1]\n if today < weeks[0]:\n number = 0\n else:\n number = 10\n return [cooked, number]",
"def group_by_weekday(items):\n result = [[], [], [], [], [], [], []] # one list for every day in week\n for date in items:\n start = items[date]['start']\n end = items[date]['end']\n result[date.weekday()].append(interval(start, end))\n return result"
] | [
"0.7323955",
"0.62316436",
"0.6126304",
"0.6112387",
"0.6050204",
"0.6010439",
"0.5954555",
"0.59421486",
"0.5924618",
"0.59082824",
"0.5864308",
"0.5844851",
"0.5800671",
"0.57960045",
"0.57312167",
"0.5713384",
"0.5708791",
"0.568154",
"0.5680631",
"0.56350195",
"0.56308794",
"0.56270885",
"0.5578891",
"0.55713",
"0.5563999",
"0.5535902",
"0.55171204",
"0.54707116",
"0.54652864",
"0.54488355"
] | 0.7517364 | 0 |
Visualizes the week by week eviction data into a graph | def graphify(evictions_per_week):
weeks = []
for week in evictions_per_week.keys():
if '2020' in week:
weeks.append(week)
evictions_filed = []
for week in weeks:
evictions_filed.append(evictions_per_week[week])
plt.figure(figsize=(50, 10))
plt.plot(weeks, evictions_filed)
plt.xlabel('Date')
plt.ylabel('Evictions filed')
plt.title('Evictions filed by the week')
plt.show()
return weeks, evictions_filed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def graph_baseline(evictiondata, weeks):\r\n base_evictions_per_week = {}\r\n for index, row in evictiondata.iterrows():\r\n if row['week_date'] not in base_evictions_per_week.keys():\r\n base_evictions_per_week[row['week_date']] = row['filings_avg']\r\n elif row['GEOID'] != 'sealed':\r\n base_evictions_per_week[row['week_date']] += row['filings_avg']\r\n base_evictions_filed = []\r\n for week in weeks:\r\n base_evictions_filed.append(base_evictions_per_week[week])\r\n\r\n plt.figure(figsize=(50, 10))\r\n plt.plot(weeks, base_evictions_filed, color='orange')\r\n plt.title('Base Evictions filed by the week')\r\n plt.xlabel('Date')\r\n plt.ylabel('Evictions filed')\r\n plt.show()\r\n return base_evictions_filed",
"def visualize_days():\n\t\n\t#grab our parsed data that we parsed earlier\n\tdata_file = parse(MY_FILE, \",\")\n\t\n\t#make a new variable, counter, from iterating through each line of\n\t#data in the parsed data, and count how many incidents happen on each\n\t#day of the week\n\tcounter = Counter(item[\"DayOfWeek\"] for item in data_file)\n\t\n\t#separate the x-axis data (days of the week) from the counter variable\n\t#from the y-axis (number of incidents each day)\n\tdata_list = [\n\t\t\t\tcounter[\"Monday\"],\n\t\t\t\tcounter[\"Tuesday\"],\n\t\t\t\tcounter[\"Wednesday\"],\n\t\t\t\tcounter[\"Thursday\"],\n\t\t\t\tcounter[\"Friday\"],\n\t\t\t\tcounter[\"Saturday\"],\n\t\t\t\tcounter[\"Sunday\"]\n\t\t\t\t]\n\tday_tuple = tuple([\"Mon\", \"Tues\", \"Wed\", \"Thurs\", \"Fri\", \"Sat\", \"Sun\"])\n\t\n\t#with y-axis data, assign it to a matplotlib plot instance\n\tplt.plot(data_list)\n\t\n\t#create amount of ticks need for x and y axes and assign labels\n\tplt.xticks(range(len(day_tuple)), day_tuple)\n\t\n\t#save the plot\n\tplt.savefig(\"Days.png\")\n\t\n\t#close plot file\n\tplt.clf()",
"def visualize_days():\n\n # grab our parsed data that we parsed earlier\n data_file = parse(MY_FILE, \",\")\n\n counter = Counter(item['DayOfWeek'] for item in data_file)\n\n data_list = [\n counter['Monday'],\n counter['Tuesday'],\n counter['Wednesday'],\n counter['Thursday'],\n counter['Friday'],\n counter['Saturday'],\n counter['Sunday']\n ]\n\n day_tuple = tuple(['Mon','Tues','Wed','Thurs','Fri','Sat','Sun'])\n\n plt.plot(data_list)\n\n # num of ticks needed for our x-axis & assign labels\n plt.xticks(range(len(day_tuple)),day_tuple)\n \n plt.savefig(\"Days.png\")\n plt.clf()",
"def cross_analyze(evictions_filed, base_evictions_filed, weeks):\r\n plt.figure(figsize=(50, 10))\r\n plt.plot(weeks, evictions_filed, label = '2020')\r\n plt.plot(weeks, base_evictions_filed, label = '2015-2016')\r\n plt.xlabel('Date', fontsize = 25)\r\n plt.ylabel('Evictions filed', fontsize = 25)\r\n plt.title('Evictions filed by the week', fontsize = 40)\r\n plt.legend()\r\n plt.annotate('Texas Supreme Court puts a temporary \\n stay on eviction proceedings.', xy = ('3/8/2020', 1551), fontsize = 15)\r\n plt.show()",
"def visualize_days(parsed_data, output_dir):\n\n # Returning no. of incidents by each day of the week\n counter = fetch_incident_by_days(parsed_data)\n\n # data_list = fetch_incident_by_days.keys()\n\n # Separating the counter to have an ordered list\n y_values = [\n counter[\"Monday\"],\n counter[\"Tuesday\"],\n counter[\"Wednesday\"],\n counter[\"Thursday\"],\n counter[\"Friday\"],\n counter[\"Saturday\"],\n counter[\"Sunday\"]\n ]\n\n # Creating labels for x-axis\n x_labels = tuple([\"Mon\", \"Tues\", \"Wed\", \"Thurs\", \"Fri\", \"Sat\", \"Sun\"])\n\n # Assigning the data to plot\n plt.plot(y_values)\n\n # Assigning xticks on x-axis\n plt.xticks(range(len(x_labels)), x_labels)\n\n # Save the graph and show the figure\n file_name = os.path.join(output_dir, DAYS_PLOT_FILENAME)\n plt.savefig(file_name)\n plt.show()",
"def plot_typical_day(bdf):\n\n bdf[\"time\"] = bdf.index.time\n bdf[\"weekday\"] = bdf.index.weekday < 5\n week_profile = bdf[bdf.weekday].pivot_table(\n index=\"time\", values=\"load\", aggfunc=[\"mean\", \"max\", \"min\"]\n )\n week_profile.columns = [\"week_mean\", \"week_max\", \"week_min\"]\n weekend_profile = bdf[~bdf.weekday].pivot_table(\n index=\"time\", values=\"load\", aggfunc=[\"mean\", \"max\", \"min\"]\n )\n weekend_profile.columns = [\"weekend_mean\", \"weekend_max\", \"weekend_min\"]\n profile = week_profile.merge(weekend_profile, left_index=True, right_index=True)\n fig = profile.iplot(\n asFigure=True,\n layout=dict(\n title=\"Typisch week/weekend profiel\",\n xaxis=dict(title=\"Tijd vd dag [-]\"),\n yaxis=dict(title=\"Belasting [MW]\"),\n ),\n )\n\n # Update traces based on name\n for trace in fig.data:\n if \"week\" in trace[\"name\"]:\n trace.update(line=dict(color=\"blue\"))\n if \"weekend\" in trace[\"name\"]:\n trace.update(line=dict(color=\"green\"))\n if \"min\" in trace[\"name\"] or \"max\" in trace[\"name\"]:\n trace.update(line=dict(dash=\"dot\"))\n\n return fig",
"def display(self):\n \n # initialize SQL kit to access database\n s = SQL_Kit(self.userID, self.password, self.database)\n \n \n \"\"\" Total Activity by hour \"\"\"\n \n # get activity data\n all_date_times = self.activity().index\n\n all_days = []\n all_hours = []\n for item in all_date_times:\n all_days.append((item.timetuple().tm_yday))\n all_hours.append(item.hour)\n\n x = all_days\n y = all_hours\n x_labels = pd.Series(all_days).unique()\n\n fig1, ax1 = plt.subplots()\n ax1.set_title('Hourly Activity')\n ax1.scatter(x,y,color='mediumspringgreen',linewidths=1)\n ax1.set_xlabel('day of year')\n ax1.set_ylabel('hour')\n ax1.xaxis.grid(True)\n\n if len(x_labels) > 5:\n ax1.xaxis.set_ticks([min(all_days), max(all_days)])\n else:\n ax1.xaxis.set_ticks(x_labels)\n\n ax1.yaxis.grid(False) \n plt.show()\n \n \n \"\"\" MOVING AVERAGE \"\"\"\n \n df = self.activity().reset_index()\n\n def day_of_year(datetime_entry):\n return datetime_entry.timetuple().tm_yday\n\n df['day_of_year'] = list(df.apply(lambda x: day_of_year(x['EventDateTime']),axis=1))\n daily_count = df['day_of_year'].value_counts().sort_index()\n\n averages = []\n i=1\n for value_count in daily_count:\n values = daily_count[:i]\n average = round(sum(values)/len(values),2)\n averages.append(average)\n i+=1\n\n day_list = list(df['day_of_year'].unique())\n\n avg_move_df = pd.DataFrame([day_list,averages]).T\n avg_move_df.rename(columns={0: 'day_id', 1: 'moving_avg'},inplace=True)\n avg_move_df.set_index('day_id',inplace=True)\n \n fig1, ax1 = plt.subplots()\n ax1.plot(avg_move_df.index.astype(int),avg_move_df['moving_avg'], color='mediumspringgreen')\n ax1.set_title('Moving AVG')\n ax1.set_xlabel('day_of_year')\n ax1.xaxis.set_ticks([min(all_days), max(all_days)])\n ax1.set_ylabel('Daily Activity')\n plt.show()\n \n \n \n \"\"\" Top 5 Samples \"\"\"\n \n data = s.select_table('sample')['SoundCategory'].value_counts()\n \n objects = list(data)[:5]\n y_pos = list(data.index)[:5]\n\n # get class info from class_absence_stats dataframe\n #fig2 = plt.figure(2) \n plt.bar(y_pos, objects, align='center', alpha=0.8, color='mediumspringgreen')\n plt.ylabel('Usage')\n plt.xlabel('Sound Category')\n plt.title('Top 5 Samples')\n plt.show()\n \n \n \"\"\" Top 3 Chords \"\"\"\n \n data = s.select_table('chord')['ChordLabel'].value_counts()\n\n objects = list(data)[:3]\n y_pos = list(data.index)[:3]\n\n # get class info from class_absence_stats dataframe\n #fig2 = plt.figure(2) \n plt.bar(y_pos, objects, align='center', alpha=0.8, color='mediumspringgreen')\n plt.ylabel('Usage')\n plt.xlabel('Chord Label')\n plt.title('Top 3 Chords')\n plt.show()\n \n \n \"\"\" Top 3 Wave Types \"\"\"\n \n # get SQL table data\n set_1 = s.select_table('createwave')\n set_2 = s.select_table('sequence')\n set_3 = s.select_table('arpeggio')\n set_4 = s.select_table('chord')\n\n # concat tables into single pandas series\n all_wave_types = pd.concat([set_1['WaveType'], set_2['WaveType'], set_3['WaveType'], set_4['WaveType']])\n\n # sort values, show top 3\n top_3 = all_wave_types.value_counts().head(3)\n\n\n # Pie chart, where the slices will be ordered and plotted counter-clockwise:\n labels = list(top_3.index)\n sizes = list(top_3.values)\n explode = (0, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\n fig1, ax1 = plt.subplots()\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, colors=['g','b','r'], startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n ax1.set_title('Top Wave Types')\n\n plt.show()",
"def graph_data(self, timeframe):\n logging.info(\"Graphing Data\")\n pprog = self.prog_logs\n cursor = pprog.find({})\n data = {\n \"emotional\": [],\n \"physical\": [],\n \"cognitive\": []\n }\n comp = self.get_timeframe(timeframe)\n for doc in cursor:\n date = list(doc.keys())[1]\n try:\n datecomp = datetime.datetime.strptime(date, \"%Y-%m-%d %H:%M\")\n except:\n datecomp = datetime.datetime.today()\n if datecomp > datetime.datetime.combine(comp, datetime.time.min):\n for key in data.keys():\n rating = int(doc[date][\"data\"][key][\"rating\"])\n data[key].append(rating)\n plt.ylabel('Level')\n plt.xlabel('Number of Logs - Ordered By Date')\n for key in data.keys():\n plt.plot(data[key])\n plt.legend(['Emotional', 'Physical', 'Cognitive'], loc='upper left')\n plt.show()",
"def week_chart(station_id):\r\n\r\n engine = get_db()\r\n # According to the parameter:station_id\r\n # select the occupancy of the corresponding station from the database.\r\n sql = \"SELECT available_bikes, available_bike_stands, last_update FROM STATION where number={};\".format(station_id)\r\n rows = engine.execute(sql).fetchall()\r\n\r\n week_average_bikes = []\r\n week_average_stands = []\r\n\r\n # The values 0 - 6 in the list day represent the days from Sunday to Saturday\r\n days = [0, 1, 2, 3, 4, 5, 6]\r\n for day in days:\r\n # Invoking the function:day_avg, calculate the average occupancy on a single day, and then add it to the list\r\n week_average_bikes.append(day_avg(rows, day)[0])\r\n week_average_stands.append(day_avg(rows, day)[1])\r\n daily = jsonify(week_average_bikes=week_average_bikes, week_average_stands=week_average_stands)\r\n return daily",
"def week_top_five(db: str) -> None:\n # Creating x and y variables for each month using the helper function\n # max_delays to get the five stations with the longest overall delays\n month_1 = [max_delays(db, 0)]\n month_2 = [max_delays(db, 1)]\n month_3 = [max_delays(db, 2)]\n month_4 = [max_delays(db, 3)]\n month_5 = [max_delays(db, 4)]\n month_6 = [max_delays(db, 5)]\n \n # using the variables to plot bar graphs of each month.\n plt.figure(figsize=(35, 20)) \n plt.xticks(fontsize=20) \n \n plt.subplot(2, 3, 1)\n plt.xlabel('Station')\n plt.ylabel('Total Time of Delays (minutes)') \n plt.title('Sept 2017') \n plt.bar(month_1[0][0], month_1[0][1])\n \n plt.subplot(2, 3, 2)\n plt.xlabel('Station')\n plt.ylabel('Total Time of Delays (minutes)') \n plt.title('Oct 2017') \n plt.bar(month_2[0][0], month_2[0][1]) \n \n plt.subplot(2, 3, 3)\n plt.xlabel('Station')\n plt.ylabel('Total Time of Delays (minutes)') \n plt.title('Nov 2017') \n plt.bar(month_3[0][0], month_3[0][1]) \n \n plt.subplot(2, 3, 4)\n plt.xlabel('Station')\n plt.ylabel('Total Time of Delays (minutes)') \n plt.title('Dec 2017') \n plt.bar(month_4[0][0], month_4[0][1]) \n \n plt.subplot(2, 3, 5)\n plt.xlabel('Station')\n plt.ylabel('Total Time of Delays (minutes)') \n plt.title('Jan 2018') \n plt.bar(month_5[0][0], month_5[0][1])\n \n plt.subplot(2, 3, 6)\n plt.xlabel('Station')\n plt.ylabel('Total Time of Delays (minutes)') \n plt.title('Feb 2018') \n plt.bar(month_6[0][0], month_6[0][1]) \n \n plt.tight_layout()\n plt.savefig('week_top_five.png')\n plt.close()",
"def data_visualization(df):\r\n\r\n # Visualizing the target variable\r\n plt.figure(figsize=(14, 10))\r\n plt.title(\"Count of bike sharing according to dates\")\r\n plt.plot(df['dteday'], df['cnt'])\r\n #plt.show()\r\n plt.savefig(\"Raw data visualization.png\")\r\n\r\n # box plot for visualizing outliers\r\n fig=px.box(df, y=\"cnt\", notched=True,title='Box plot of the count variable')\r\n #fig.show()\r\n plt.savefig(\"Box Plot.png\")\r\n\r\n # point plot for hourly utilization\r\n for column in ['season', 'yr', 'mnth', 'holiday', 'weekday', 'workingday', 'weathersit']:\r\n hist = px.histogram(df, x=column, y='cnt')\r\n hist.show()\r\n plt.savefig(\"Histogram plots for each column.png\")\r\n sns.pointplot(x=df['hr'], y='cnt', data=df);\r\n plt.title(\"Hourly Utilization\")\r\n plt.ylabel(\"Bike Shares\", fontsize=12)\r\n plt.xlabel(\"Hour\", fontsize=12)\r\n plt.savefig(\"Hourly Utilization point plot.png\", dpi=300, bbox_inches='tight')\r\n\r\n # line plot for hourly utilization\r\n for c in ['holiday','season','workingday']:\r\n sns.lineplot(data=df,x='hr',y='cnt',hue=c)\r\n plt.title('Hourly plot vs count')\r\n plt.savefig(\"Hour vs count plot_main features.png\",dpi=300, bbox_inches='tight')\r\n\r\n # point plots for humidity vs count\r\n sns.pointplot(x='hum', y='cnt', data=df)\r\n plt.title(\"Amount of bike shares vs humidity\", fontsize=25)\r\n plt.xlabel(\"Humidity (%)\", fontsize=20)\r\n plt.ylabel('count of bike shares', fontsize=20)\r\n plt.locator_params(axis='x', nbins=10)\r\n plt.savefig(\"Pointplot of humidity vs count.png\",dpi=300, bbox_inches='tight')\r\n\r\n # box plots of whole df\r\n bx=px.box(df, y=\"cnt\")\r\n bx.show()\r\n\r\n # feature correlation plot\r\n corrs = abs(df.corr())\r\n sns.heatmap(corrs, annot=True)\r\n plt.title(\"Feature Correlation\")\r\n plt.savefig(\"Feature_correlation.png\", dpi=300, bbox_inches='tight')\r\n return plt",
"def visualize(self):\n NUM_AFFINITY = 4\n NUM_WILL = 7\n\n # Colors for the tasks and categories\n COLORS = d3['Category20c'][20] + d3['Category20b'][20]\n COLORS_CAT = d3['Category20'][20]\n COLORS_AFFINITY = brewer['Greens'][NUM_AFFINITY]\n COLORS_WILL = brewer['RdBu'][NUM_WILL]\n\n # Date range for the figure title\n start_str = c.START.strftime(\"%A %m/%d/%y\")\n end_str = c.END.strftime(\"%A %m/%d/%y\")\n\n # Day of week range for the x axis\n start_weekday_str = c.START.strftime(\"%a\")\n end_weekday_str = c.END.strftime(\"%a\")\n\n times, tasks = self.array.nonzero()\n day_start = tutil.DAY_START\n hours = (times % tutil.SLOTS_PER_DAY) / tutil.SLOTS_PER_HOUR\n bottom = day_start + hours\n top = bottom + (0.95 / tutil.SLOTS_PER_HOUR)\n left = np.floor(times / tutil.SLOTS_PER_DAY)\n right = left + 0.75\n chunk_min = [self.task_chunk_min[j] for j in tasks]\n chunk_max = [self.task_chunk_max[j] for j in tasks]\n affinity_cog_task = [self.task_cognitive_load[j] for j in tasks]\n affinity_cog_slot = [c.AFFINITY_COGNITIVE[i] for i in times]\n affinity_cognitive = (np.array(affinity_cog_task) * np.array(\n affinity_cog_slot)).tolist()\n willpower_task = [self.task_willpower_load[j] for j in tasks]\n willpower_cumulative = np.cumsum(willpower_task)\n duration = [self.task_duration[j] for j in tasks]\n duration_realized = [self.task_duration_realized[j] for j in tasks]\n task_names = [self.task_names[j] for j in tasks]\n category_ids = [[l for l, j in enumerate(array) if j != 0] for array in\n [self.task_category[j, :] for j in tasks]]\n category = [\", \".join(\n [self.cat_names[l] for l, j in enumerate(array) if j != 0]) for\n array in [self.task_category[j, :] for j in tasks]]\n data_tooltips = dict(\n chunk_min=chunk_min,\n chunk_max=chunk_max,\n affinity_cognitive=affinity_cognitive,\n affinity_cog_slot=affinity_cog_slot,\n affinity_cog_task=affinity_cog_task,\n willpower_task=willpower_task,\n willpower_cumulative=willpower_cumulative,\n duration=duration,\n duration_realized=duration_realized,\n task_id=tasks,\n task=task_names,\n category=category,\n )\n\n offset = self.num_tasks - self.num_categories\n # Use #deebf7 as placeholder/default event color\n colors = [COLORS[i % len(COLORS)] if i < offset else '#ffffcc' for i in\n tasks]\n data1 = data_tooltips.copy()\n data1.update(dict(\n top=top,\n bottom=bottom,\n left=left,\n right=right,\n colors=colors,\n ))\n source1 = ColumnDataSource(data=data1)\n\n TOOLTIPS = [(\"task\", \"@task\"),\n (\"category\", \"@category\"),\n (\"duration\", \"@duration_realized / @duration\"),\n (\"willpower\", \"@willpower_task\"),\n (\"willpower (cum)\", \"@willpower_cumulative\"),\n (\"chunk_range\", \"(@chunk_min, @chunk_max)\"),\n (\"affinity [slot x task]\", \"@affinity_cognitive = \"\n \"@affinity_cog_slot x \"\n \"@affinity_cog_task\"),\n (\"task_id\", \"@task_id\"),\n (\"index\", \"$index\"),\n (\"(t,l)\", \"(@bottom, @left)\"),\n ]\n\n # [Bokeh] inverted axis range example:\n # https://groups.google.com/a/continuum.io/forum/#!topic/bokeh/CJAvppgQmKo\n yr = Range1d(start=22, end=6)\n # yr = Range1d(start=24.5, end=-0.5)\n xr = Range1d(start=-0.3, end=7.3)\n p = figure(plot_width=1000, plot_height=600, y_range=yr, x_range=xr,\n tooltips=TOOLTIPS,\n title=\"Calendar: {} to {}\".format(start_str, end_str))\n self.p = p\n output_file(\"calendar.html\")\n\n p.xaxis[0].axis_label = 'Weekday ({}-{})'.format(start_weekday_str,\n end_weekday_str)\n p.yaxis[0].axis_label = 'Hour (7AM-9:30PM)'\n\n # Replace default yaxis so that each hour is displayed\n p.yaxis[0].ticker.desired_num_ticks = int(tutil.HOURS_PER_DAY)\n p.yaxis[0].ticker.num_minor_ticks = 4\n p.xaxis[0].ticker.num_minor_ticks = 0\n\n # Display task allocation as colored rectangles\n p.quad(top='top', bottom='bottom', left='left', right='right',\n color='colors', fill_alpha=0.7, line_alpha=0.5, source=source1)\n\n # Pre-process task names for display (no repeats, abbreviated names)\n # FIXME(cathywu) currently assumes that y is in time order, which may\n # not be the case when more task types are incorporated\n task_display = []\n curr_task = \"\"\n for name in task_names:\n if name == curr_task:\n task_display.append(\"\")\n else:\n curr_task = name\n task_display.append(name)\n data2 = data_tooltips.copy()\n data2.update(dict(\n x=left,\n y=top,\n # abbreviated version of task\n task=[k[:19] for k in task_display],\n ))\n source2 = ColumnDataSource(data=data2)\n\n # Annotate rectangles with task name\n # [Bokeh] Text properties:\n # https://bokeh.pydata.org/en/latest/docs/user_guide/styling.html#text-properties\n labels = LabelSet(x='x', y='y', text='task', level='glyph', x_offset=3,\n y_offset=-1, source=source2, text_font_size='7pt',\n render_mode='canvas')\n p.add_layout(labels)\n\n # Display cognitive affinity as rectangle to the right of the task\n colors_affinity = np.array(\n np.array(affinity_cognitive) * (NUM_AFFINITY - 1), dtype=int)\n colors_affinity = [COLORS_AFFINITY[NUM_AFFINITY - 1 - i] for i in\n colors_affinity.tolist()]\n data5 = data_tooltips.copy()\n data5.update(dict(\n top=(np.array(top) - 0.05).tolist(),\n bottom=(np.array(bottom) + 0.05).tolist(),\n left=(np.array(right) + 0.12).tolist(),\n right=(np.array(right) + 0.2).tolist(),\n colors=colors_affinity,\n ))\n source5 = ColumnDataSource(data=data5)\n p.quad(top='top', bottom='bottom', left='left', right='right',\n color='colors', source=source5)\n\n # Display willpower balance as rectangle to the right of the task\n colors_will = np.minimum(willpower_cumulative, 2)\n colors_will = np.maximum(colors_will, -2)\n colors_will += 2\n colors_will = np.array(colors_will / 4 * (NUM_WILL - 1), dtype=int)\n colors_will = [COLORS_WILL[i] for i in colors_will.tolist()]\n data6 = data_tooltips.copy()\n data6.update(dict(\n top=top,\n bottom=bottom,\n left=np.array(right) + 0.02,\n right=(np.array(right) + 0.1).tolist(),\n colors=colors_will,\n ))\n source6 = ColumnDataSource(data=data6)\n p.quad(top='top', bottom='bottom', left='left', right='right',\n color='colors', source=source6)\n\n # Display categories as a colored line on the left\n # TODO(cathywu) currently displays only the \"first\" category,\n # add support for more categories\n xs = []\n ys = []\n for y0, y1, x in zip(top, bottom, left):\n xs.append([x, x])\n ys.append([y0, y1])\n colors_cat = [COLORS_CAT[cat_ids[0] % len(COLORS_CAT)] for cat_ids in\n category_ids]\n data3 = data_tooltips.copy()\n data3.update(dict(\n xs=xs,\n ys=ys,\n colors=colors_cat,\n ))\n source3 = ColumnDataSource(data=data3)\n p.multi_line(xs='xs', ys='ys', color='colors', line_width=4,\n source=source3)\n\n # Annotate columns with day of the week\n data4 = data_tooltips.copy()\n data4.update(dict(\n x=[k + 0.1 for k in range(tutil.LOOKAHEAD)],\n y=[6.75 for _ in range(tutil.LOOKAHEAD)],\n weekday=[(c.START + timedelta(k)).strftime(\"%A\") for k in\n range(tutil.LOOKAHEAD)],\n ))\n source4 = ColumnDataSource(data=data4)\n labels2 = LabelSet(x='x', y='y', text='weekday', level='glyph',\n x_offset=3, y_offset=-1, source=source4,\n text_font_size='10pt', render_mode='canvas')\n p.add_layout(labels2)\n\n show(p)",
"def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r",
"def weekly():",
"def plot_dispatch(pv, demand, E, week=30):\n\n sliced_index = (pv.index.week==week)\n pv_sliced = pv[sliced_index]\n demand_sliced = demand[sliced_index]\n self_consumption = E['inv2load'][sliced_index]\n \n direct_self_consumption = np.minimum(pv_sliced,demand_sliced)# E['inv2load'][sliced_index]\n indirect_self_consumption = self_consumption-direct_self_consumption\n res_pv_sliced = E['res_pv'][sliced_index]\n grid2load_sliced = E['grid2load'][sliced_index]\n store2inv_sliced = E['store2inv'][sliced_index]\n LevelOfCharge = E['LevelOfCharge'][sliced_index]\n inv2grid = E['inv2grid'][sliced_index]\n grid2load = E['grid2load'][sliced_index]\n aux=np.maximum(0,self_consumption)\n\n fig, axes = plt.subplots(nrows=3, ncols=1, sharex=True, figsize=(17, 4*3), frameon=False,\n gridspec_kw={'height_ratios': [3, 1, 1], 'hspace': 0.04})\n\n #fig, ax = plt.subplots(figsize=(17, 4))\n axes[0].plot(demand_sliced.index, demand_sliced, color='black', lw=2,label='demand')\n axes[0].plot(pv_sliced.index, pv_sliced, color='black',ls='--', lw=2,label='PV')\n axes[0].fill_between(direct_self_consumption.index, 0, direct_self_consumption, color='orange', alpha=.8, label='DSC')\n axes[0].fill_between(pv_sliced.index, self_consumption, pv_sliced , where=pv_sliced<demand_sliced,color='blue', hatch='//',\n alpha=.3,label='ISC')\n axes[0].fill_between(pv_sliced.index, direct_self_consumption, pv_sliced ,where=pv_sliced>demand_sliced, color='gold', alpha=.3,label='Excess PV')\n\n axes[0].fill_between(grid2load_sliced.index,self_consumption,demand_sliced,color='red',alpha=.2, label='grid2load')\n \n\n #axes[0].plot(grid2load_sliced.index, grid2load_sliced, color='red', ls=\":\", lw=1)\n axes[0].set_ylim([0, axes[0].get_ylim()[1] ])\n axes[0].set_ylabel('Power (kW)')\n\n axes[1].fill_between(LevelOfCharge.index, 0, LevelOfCharge, color='grey', alpha=.2, label='SOC')\n axes[1].set_ylabel('State of Charge (kWh)')\n\n axes[2].fill_between(inv2grid.index, 0, inv2grid, color='green', alpha=.2,label='injected2grid')\n axes[2].fill_between(inv2grid.index, 0, -grid2load, color='red', alpha=.2,label='grid drawn')\n axes[2].set_ylabel('In/out from grid (kW)')\n axes[0].legend()\n axes[1].legend()\n axes[2].legend()\n return",
"def graph7():\r\n sheet = workbook.sheet_by_index(4)\r\n data = [[sheet.cell_value(r, c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]\r\n\r\n for i in range(4, 5):\r\n list_data[0].append(round((data[i][1]/sum(data[i][j] for j in range(1, 6)))*100, 2))\r\n list_data[1].append(round((data[i][3]/sum(data[i][j] for j in range(1, 6)))*100, 2))\r\n list_data[2].append(round((data[i][4]/sum(data[i][j] for j in range(1, 6)))*100, 2))\r\n list_data[3].append(round((data[i][5]/sum(data[i][j] for j in range(1, 6)))*100, 2))\r\n\r\n gauge = pygal.SolidGauge(inner_radius=0.70, title=u'รอยละของประชากรอายุ 15 ปขึ้นไปที่ดื่มสุราหรือเครื่องดื่มมึนเมา จําแนกตามความถี่ในการดื่มสุราหรือเครื่องดื่มมึนเมา ปี 2550')\r\n percent_formatter = lambda x: '{:.10g}%'.format(x)\r\n gauge.value_formatter = percent_formatter\r\n for i in range(4):\r\n gauge.add(data_name[i], list_data[i])\r\n gauge.render_to_file('7Classified by frequency of drinking in 2550.svg')",
"def hr_report():\n\n # Load the peak data.\n db = Persistence()\n if not (activities := db.load_all()):\n print(\"No data to report on\")\n return\n\n # Find the maximum for each value.\n max = _load_max_values(activities)\n\n # Totals for the current week\n week_distance_total = 0\n week_elevation_total = 0\n week_duration_total = timedelta()\n week_work_days = 0\n week_5sec_average = []\n week_30sec_average = []\n week_60sec_average = []\n week_5min_average = []\n week_10min_average = []\n week_20min_average = []\n week_30min_average = []\n week_60min_average = []\n week_90min_average = []\n week_120min_average = []\n\n # Print the peak data for each week.\n current_weekday = None\n for activity in activities:\n\n # Time to break to a new week?\n if current_weekday is None or current_weekday > activity.start_time.weekday():\n if current_weekday:\n _print_footer(\n week_distance_total=week_distance_total,\n week_elevation_total=week_elevation_total,\n week_duration_total=week_duration_total,\n week_work_days=week_work_days,\n week_5sec_average=week_5sec_average,\n week_30sec_average=week_30sec_average,\n week_60sec_average=week_60sec_average,\n week_5min_average=week_5min_average,\n week_10min_average=week_10min_average,\n week_20min_average=week_20min_average,\n week_30min_average=week_30min_average,\n week_60min_average=week_60min_average,\n week_90min_average=week_90min_average,\n week_120min_average=week_120min_average,\n )\n week_distance_total = 0\n week_elevation_total = 0\n week_duration_total = timedelta(0)\n week_work_days = 0\n week_5sec_average = []\n week_30sec_average = []\n week_60sec_average = []\n week_5min_average = []\n week_10min_average = []\n week_20min_average = []\n week_30min_average = []\n week_60min_average = []\n week_90min_average = []\n week_120min_average = []\n\n _print_header()\n\n # Capture the weekday.\n if current_weekday is None or current_weekday != activity.start_time.weekday():\n week_work_days = week_work_days + 1\n\n current_weekday = activity.start_time.weekday()\n\n # Print the detail.\n _print_detail(activity, max)\n\n # Find the duration.\n duration = activity.end_time - activity.start_time\n\n # Accumulate for this week\n week_distance_total = week_distance_total + activity.distance\n if activity.elevation:\n week_elevation_total = week_elevation_total + activity.elevation\n week_duration_total = week_duration_total + duration\n week_5sec_average.append(activity.peak_5sec_hr)\n week_30sec_average.append(activity.peak_30sec_hr)\n week_60sec_average.append(activity.peak_60sec_hr)\n if activity.peak_5min_hr:\n week_5min_average.append(activity.peak_5min_hr)\n if activity.peak_10min_hr:\n week_10min_average.append(activity.peak_10min_hr)\n if activity.peak_20min_hr:\n week_20min_average.append(activity.peak_20min_hr)\n if activity.peak_30min_hr:\n week_30min_average.append(activity.peak_30min_hr)\n if activity.peak_60min_hr:\n week_60min_average.append(activity.peak_60min_hr)\n if activity.peak_90min_hr:\n week_90min_average.append(activity.peak_90min_hr)\n if activity.peak_120min_hr:\n week_120min_average.append(activity.peak_120min_hr)\n\n # Final footer.\n _print_footer(\n week_distance_total=week_distance_total,\n week_elevation_total=week_elevation_total,\n week_duration_total=week_duration_total,\n week_work_days=week_work_days,\n week_5sec_average=week_5sec_average,\n week_30sec_average=week_30sec_average,\n week_60sec_average=week_60sec_average,\n week_5min_average=week_5min_average,\n week_10min_average=week_10min_average,\n week_20min_average=week_20min_average,\n week_30min_average=week_30min_average,\n week_60min_average=week_60min_average,\n week_90min_average=week_90min_average,\n week_120min_average=week_120min_average,\n )\n\n # Print the summary.\n _print_summary(max)",
"def print_charts(dataset, title, weekday=False):\n chart = []\n keys = sorted(dataset.keys())\n mean = numpy.mean(list(dataset.values()))\n median = numpy.median(list(dataset.values()))\n if args.json is False:\n export_string(title)\n\n for key in keys:\n if (dataset[key] >= median * 1.33):\n displayed_key = \"%s (\\033[92m+\\033[0m)\" % (int_to_weekday(key) if weekday else key)\n elif (dataset[key] <= median * 0.66):\n displayed_key = \"%s (\\033[91m-\\033[0m)\" % (int_to_weekday(key) if weekday else key)\n else:\n displayed_key = (int_to_weekday(key) if weekday else key)\n if args.json is False:\n export_string(\"%s - %s\" % (dataset[key], (int_to_weekday(key) if weekday else key)))\n chart.append((displayed_key, dataset[key]))\n\n thresholds = {\n int(mean): Gre, int(mean * 2): Yel, int(mean * 3): Red,\n }\n\n data = hcolor(chart, thresholds)\n\n graph = Pyasciigraph(\n separator_length=4,\n multivalue=False,\n human_readable='si',\n )\n\n if args.json is False:\n for line in graph.graph(title, data):\n if not color_supported:\n ansi_escape = re.compile(r'\\x1B\\[[0-?]*[ -/]*[@-~]')\n line = ansi_escape.sub('', line)\n print(line)\n cprint(\"\")",
"def weekly(evictiondata):\r\n evictions_per_week = {}\r\n for index, row in evictiondata.iterrows():\r\n if row['week_date'] not in evictions_per_week.keys():\r\n evictions_per_week[row['week_date']] = row['filings_2020']\r\n else:\r\n evictions_per_week[row['week_date']] += row['filings_2020']\r\n return evictions_per_week",
"def graph11():\r\n sheet = workbook.sheet_by_index(4)\r\n data = [[sheet.cell_value(r, c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]\r\n\r\n for i in range(2, sheet.nrows):\r\n for k in range(1, 6):\r\n list_data[i-2].append({'value': round((data[i][k]/sum(data[i][j] for j in range(1, 6)))*100, 2), 'label': '%.2f%s' %(round((data[i][k]/sum(data[i][j] for j in range(1, 6)))*100, 2), '%')})\r\n\r\n line_chart = pygal.HorizontalBar(print_labels=True, stack_from_top=False)\r\n line_chart.title = 'เปรียบเทียบรอยละของประชากรอายุ 15 ปขึ้นไปที่ดื่มสุราหรือเครื่องดื่มมึนเมา จําแนกตามความถี่ในการดื่มสุราหรือเครื่องดื่มมึนเมา ปี 2544 - 2557'\r\n line_chart.x_labels = [\"ดื่มทุกวัน\", \"5-6 วันต่อสัปดาห์\", \"3-4 วันต่อสัปดาห์\", \"1-2 วันต่อสัปดาห์\", \"ดื่มนานๆครั้ง\"]\r\n line_chart.y_labels = map(int, range(0, 71, 10))\r\n for i in range(6):\r\n line_chart.add(data_name[i], list_data[i])\r\n line_chart.render_to_file('11Compare graph of Classified by frequency of drinking in 2544 - 2557.svg')",
"def plot_messages_by_week(self, **kwargs):\n assert not (self.__messages_by_week is None), 'First call get_messages_by_week'\n self.__df.resample('1W')['message'].count().plot(title= 'Messages by week', **kwargs)",
"def graphy2():\n data = pd.read_csv(\"week2.csv\")\n plot_g = pygal.Bar(fill=True, interpolate='cubic', style=LightSolarizedStyle)\n plot_g.title = \"Top Fans in Week 2\"\n plot_g.x_labels = data.GENDER\n plot_g.y_labels = map(int, range(0, 80, 10))\n plot_g.add(\"Male\", data.COUNT)\n plot_g.add(\"Female\", data.COUNT2)\n plot_g.add(\"Total\", data.COUNT3)\n plot_g.render_to_file(\"plotweek2.svg\")",
"def get_basic_plot(df, log_pathway, log_type):\n if len(df) > 0:\n # Get the date column we will use for various counts\n column_for_grouping = '{}Date'.format(log_type)\n # Add a date index to df\n df.set_index(df[column_for_grouping].apply(pd.to_datetime), inplace=True, drop=False)\n # Add Month, week and weekday columns\n df['Month'] = df.index.month\n df['Week'] = df.index.week # Should we use week of year here?\n df['WeekDay'] = df.index.weekday_name\n # Create groups for plotting\n month = df.groupby('Month').size()\n # month.index = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n week = df.groupby('Week').size()\n weekday = df.groupby('WeekDay').size()\n\n # Month\n data_viz_pathway = os.path.dirname(log_pathway).replace('logs', 'data_visualization')\n month_plot = month.plot(kind='bar')\n month_fig = month_plot.get_figure()\n month_figure_pathway = os.path.join(data_viz_pathway, '{}output_month.png'.format(log_type))\n month_fig.savefig(month_figure_pathway)\n print('Basic {} log by month chart saved to {}'.format(log_type, month_figure_pathway))\n\n # Week\n week_plot = week.plot(kind='bar')\n week_fig = week_plot.get_figure()\n week_figure_pathway = os.path.join(data_viz_pathway, '{}output_week.png'.format(log_type))\n week_fig.savefig(week_figure_pathway)\n print('Basic {} log by month chart saved to {}'.format(log_type, week_figure_pathway))\n\n # Weekday\n weekday_plot = weekday.plot(kind='bar')\n weekday_fig = weekday_plot.get_figure()\n weekday_figure_pathway = os.path.join(data_viz_pathway, '{}output_weekday.png'.format(log_type))\n weekday_fig.savefig(weekday_figure_pathway)\n print('Basic {} log by month chart saved to {}'.format(log_type, weekday_figure_pathway))",
"def graph4():\r\n sheet = workbook.sheet_by_index(3)\r\n data = [[sheet.cell_value(r, c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]\r\n\r\n for i in range(2, sheet.nrows):\r\n list_data[0].append((data[i][0], round((data[i][1]/data[i][2])*100, 2)))\r\n list_data[1].append((data[i][0], round((data[i][3]/data[i][4])*100, 2)))\r\n list_data[2].append((data[i][0], round((data[i][5]/data[i][6])*100, 2)))\r\n list_data[3].append((data[i][0], round((sum(data[i][j] for j in range(1, 6, 2))/sum(data[i][j] for j in range(2, 7, 2)))*100, 2)))\r\n\r\n line_graph = pygal.XY()\r\n line_graph.title = 'สัดส่วนของผู้ที่ดื่มแอลกอฮอล์เป็นประจำรวม และแยกตามกลุ่มอายุ ระหว่างปี 2544 ถึง 2557'\r\n line_graph.x_labels = (2544, 2546, 2548, 2550, 2552, 2554, 2556, 2558)\r\n for i in range(4):\r\n line_graph.add(data_name[i], list_data[i])\r\n line_graph.render_to_file('4Percentage of regular drinkers among drinkers by age groups between 2001 and 2014.svg')",
"def visualize_data(total_rewards):\n\n x_values = arange(0, len(total_rewards), 1)\n y_values = total_rewards\n plot(x_values, y_values)\n xlabel('episodes')\n ylabel('cumulative rewards')\n title('Reward by Episode')\n grid(True)\n show()",
"def graph3():\r\n sheet = workbook.sheet_by_index(2)\r\n data = [[sheet.cell_value(r, c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]\r\n\r\n for i in range(2, sheet.nrows):\r\n list_data[0].append((data[i][0], round((data[i][1]/data[i][2])*100, 2)))\r\n list_data[1].append((data[i][0], round((data[i][3]/data[i][4])*100, 2)))\r\n list_data[2].append((data[i][0], round((data[i][5]/data[i][6])*100, 2)))\r\n list_data[3].append((data[i][0], round((sum(data[i][j] for j in range(1, 6, 2))/sum(data[i][j] for j in range(2, 7, 2)))*100, 2)))\r\n\r\n line_graph = pygal.XY()\r\n line_graph.title = 'อัตราการดื่มเครื่องดื่มแอลกอฮอล์รวม และแยกตามกลุ่มอายุ ระหว่างปี 2544 ถึง 2557'\r\n line_graph.x_labels = (2544, 2546, 2548, 2550, 2552, 2554, 2556, 2558)\r\n for i in range(4):\r\n line_graph.add(data_name[i], list_data[i])\r\n line_graph.render_to_file('3Alcohol consumption rate by age groups between 2001 and 2014.svg')",
"def graph12():\r\n sheet = workbook.sheet_by_index(5)\r\n data = [[sheet.cell_value(r, c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]\r\n\r\n for i in range(2, sheet.nrows):\r\n for k in range(1, 6):\r\n list_data[i-2].append({'value': round((data[i][k]/sum(data[i][j] for j in range(1, 6)))*100, 2), 'label': '%.2f%s' %(round((data[i][k]/sum(data[i][j] for j in range(1, 6)))*100, 2), '%')})\r\n\r\n line_chart = pygal.HorizontalBar(print_labels=True, stack_from_top=False)\r\n line_chart.title = 'เปรียบเทียบรอยละของประชากรอายุ 15 ปขึ้นไปที่ดื่มสุราหรือเครื่องดื่มมึนเมา จำแนกตามประเภทของสุราที่ดื่มบ่อย ปี 2544 - 2557'\r\n line_chart.x_labels = ['เบียร์', 'สุราแช่พื้นบ้าน (สาโท อุ กระแช่)', 'สุราขาว, สุราสี, สุรากลั่น', 'ไวน์', 'อื่นๆ']\r\n line_chart.y_labels = map(int, range(0, 61, 10))\r\n for i in range(4):\r\n line_chart.add(data_name[i], list_data[i])\r\n line_chart.render_to_file('12Compare graph of Classified by type og alcohol in 2544 - 2557.svg')",
"def plot_activity(series, savename='activity.png'):\n # Fills the time series\n ## Fill up to next staurday (end of the week)\n series = fill_week(series)\n ### Fill or truncate timeseries to suit the plot\n number_of_days = 371\n if series.shape[0] > number_of_days:\n # truncate to 371 days\n series = series[-number_of_days:]\n elif series.shape[0] < number_of_days:\n # Fill remaing values with zero\n series = fill_year(series)\n assert series.shape[0] == number_of_days\n\n # Obtain the months for the years' week\n months = series.index.map(lambda x: x.strftime('%b')).tolist()\n n_weekdays = 7\n # Split in weeks\n months = months[::n_weekdays]\n # replace the repeated months\n current_month = ''\n for n, month in enumerate(months):\n if month == current_month:\n months[n] = ''\n else:\n current_month = month\n\n # Plot\n fig, ax = plt.subplots()\n\n sns.heatmap(series.values.reshape(-1,n_weekdays).T, ax=ax,\n cmap='YlGn', cbar=False, linewidths=1, square=True,\n xticklabels=months,\n yticklabels=['','M', '', 'W', '', 'F', ''])\n\n ax.xaxis.tick_top()\n\n plt.savefig(savename, bbox_inches='tight')",
"def graph2():\r\n sheet = workbook.sheet_by_index(1)\r\n data = [[sheet.cell_value(r, c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]\r\n\r\n for i in range(3, sheet.nrows):\r\n list_data[0].append((data[i][0], round((sum(data[i][j] for j in range(1, 5))/sum(data[i][j] for j in range(1, 6)))*100, 2)))\r\n list_data[1].append((data[i][0], round((sum(data[i][j] for j in range(6, 10))/sum(data[i][j] for j in range(6, 11)))*100, 2)))\r\n list_data[2].append((data[i][0], round(((sum(data[i][j] for j in range(1, 10)) - data[i][5])/sum(data[i][j] for j in range(1, 11)))*100, 2)))\r\n\r\n line_graph = pygal.XY()\r\n line_graph.title = 'สัดส่วนของผู้ที่ดื่มแอลกอฮอล์เป็นประจำรวม และแยกตามเพศ ระหว่างปี 2544 ถึง 2557'\r\n line_graph.x_labels = (2544, 2546, 2548, 2550, 2552, 2554, 2556, 2558)\r\n for i in range(3):\r\n line_graph.add(data_name[i], list_data[i])\r\n line_graph.render_to_file('2Percentage of regular drinkers among drinkers by genders between 2001 and 2014.svg')",
"def GetAlert(diagnostic_cases, diagnostic, week,year):\n\n diag_cases = diagnostic_cases.filter(diagnostic=diagnostic)\n average = 0\n standard_deviation = 0\n cases = 0\n #number of years\n n_years = 0\n year_var = 0\n f = []\n year_ob = Year.objects.filter(year__lt=year)\n weeks = Week.objects.filter(year__in=year_ob,week=week.week).order_by('year')\n for w in weeks:\n\n\n if year_var != w.year.year:\n n_years += 1\n year_var = w.year.year\n\n\n pac = diag_cases.filter(week=w)\n x = 0\n for p in pac:\n\n cases += p.cases\n x = p.cases\n f.append(x)\n\n if cases != 0:\n\n average = cases / n_years\n\n #calculation of standar deviation\n if len(f) != 1:\n suma2 = 0\n for cases in f:\n suma2 += (cases-average)**2\n standard_deviation = math.sqrt(suma2 / len(f))\n cases = 0\n dia = diag_cases.filter(week=week)\n\n for d in dia:\n cases += d.cases\n\n #array of class dots for draw the chart\n\n lower_rank = 0\n top_rank = 0\n if n_years != 0:\n lower_rank = average - (1.96 * standard_deviation / math.sqrt(n_years))\n top_rank = average + (1.96 * standard_deviation / math.sqrt(n_years))\n\n dots = DotsGraphicAverage(average,week.week, lower_rank, top_rank,cases)\n\n return dots"
] | [
"0.7287062",
"0.7240408",
"0.7095925",
"0.67528796",
"0.6648603",
"0.6480761",
"0.62646145",
"0.62237006",
"0.6187958",
"0.6187809",
"0.613508",
"0.60985005",
"0.60869294",
"0.6011801",
"0.59913427",
"0.59690994",
"0.59242606",
"0.59085816",
"0.5906574",
"0.5892883",
"0.58743155",
"0.5857674",
"0.5766704",
"0.5756484",
"0.57472146",
"0.5731638",
"0.57305384",
"0.57230335",
"0.57155615",
"0.569953"
] | 0.81232464 | 0 |
Graphs the baseline eviction data of 20152016 in the same format | def graph_baseline(evictiondata, weeks):
base_evictions_per_week = {}
for index, row in evictiondata.iterrows():
if row['week_date'] not in base_evictions_per_week.keys():
base_evictions_per_week[row['week_date']] = row['filings_avg']
elif row['GEOID'] != 'sealed':
base_evictions_per_week[row['week_date']] += row['filings_avg']
base_evictions_filed = []
for week in weeks:
base_evictions_filed.append(base_evictions_per_week[week])
plt.figure(figsize=(50, 10))
plt.plot(weeks, base_evictions_filed, color='orange')
plt.title('Base Evictions filed by the week')
plt.xlabel('Date')
plt.ylabel('Evictions filed')
plt.show()
return base_evictions_filed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cross_analyze(evictions_filed, base_evictions_filed, weeks):\r\n plt.figure(figsize=(50, 10))\r\n plt.plot(weeks, evictions_filed, label = '2020')\r\n plt.plot(weeks, base_evictions_filed, label = '2015-2016')\r\n plt.xlabel('Date', fontsize = 25)\r\n plt.ylabel('Evictions filed', fontsize = 25)\r\n plt.title('Evictions filed by the week', fontsize = 40)\r\n plt.legend()\r\n plt.annotate('Texas Supreme Court puts a temporary \\n stay on eviction proceedings.', xy = ('3/8/2020', 1551), fontsize = 15)\r\n plt.show()",
"def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r",
"def visualize(epc_data: List[EmissionPerCapita],\r\n prediction_year: int, title: str, frame_rate: int) -> None:\r\n\r\n # Set fit with 2 graphs.\r\n fig = make_subplots(rows=2, cols=1,\r\n subplot_titles=('Emission Per Capita (in thousand metric tons)',\r\n 'Average Emission Per Capita (in thousand metric tons)'))\r\n\r\n colors = assign_colors(epc_data) # assign colors to each element.\r\n\r\n # Initialize the two graphs.\r\n # PS: We believe there is no error in the marker_color line but\r\n # somehow pycharm insists there is.(We have tried a demo from\r\n # the official plotly library and pycharm still highlights it.)\r\n initial_sorted_top_10 = sort_top_10(epc_data, epc_data[0].start_year)\r\n initial_sorted_colors = get_sorted_colors(colors, initial_sorted_top_10[0])\r\n fig.add_trace(go.Bar(x=initial_sorted_top_10[0], y=initial_sorted_top_10[1],\r\n text=initial_sorted_top_10[0],\r\n hoverinfo='none', textposition='outside',\r\n texttemplate='%{x}<br>%{y:s}', cliponaxis=False,\r\n name='Per Capita in: ' + str(epc_data[0].start_year),\r\n marker_color=initial_sorted_colors\r\n ), row=1, col=1)\r\n\r\n x_axis = list(range(epc_data[0].start_year, epc_data[0].end_year + prediction_year + 1))\r\n fig.add_trace(go.Scatter(x=x_axis, y=[0],\r\n name='Average Per Capita: ' + str(epc_data[0].start_year)\r\n ), row=2, col=1)\r\n\r\n # Produce each frame presented in the animation.\r\n list_of_frames = []\r\n average_emission_so_far = []\r\n for i in range(epc_data[0].start_year, epc_data[0].end_year + prediction_year + 1, frame_rate):\r\n\r\n # Get the sorted top 10 and their corresponding colors for the current frame.\r\n sorted_top_10 = sort_top_10(epc_data, i)\r\n sorted_colors = get_sorted_colors(colors, sorted_top_10[0])\r\n\r\n # Append the current year average emission per capita to the accumulator.\r\n list.append(average_emission_so_far, average_emission(epc_data, i))\r\n\r\n # Append the current frame to list_of_frames using the following style.\r\n # PS: the same situation happens in this marker_color, too.\r\n list_of_frames.append(go.Frame(data=[go.Bar(x=sorted_top_10[0], y=sorted_top_10[1],\r\n text=sorted_top_10[0],\r\n hoverinfo='none', textposition='outside',\r\n texttemplate='%{x}<br>%{y:s}', cliponaxis=False,\r\n name='Per Capita in: ' + str(i),\r\n marker_color=sorted_colors),\r\n go.Scatter(x=x_axis, y=average_emission_so_far,\r\n name='Average Per Capita in: ' + str(i))],\r\n traces=[0, 1]))\r\n\r\n fig.frames = list_of_frames\r\n\r\n # Set the layout of the two graphs.\r\n fig.update_layout(updatemenus=[{'type': 'buttons',\r\n 'showactive': False,\r\n 'y': 0,\r\n 'x': 1.05,\r\n 'xanchor': 'left',\r\n 'yanchor': 'bottom',\r\n 'buttons': [{'label': 'Play',\r\n 'method': 'animate',\r\n 'args': [None]}]}],\r\n width=1400, height=750,\r\n font={'size': 20},\r\n title=title + ' (Predicted after year: ' + str(epc_data[0].end_year) + ')')\r\n fig.show()",
"def graph_data(self, timeframe):\n logging.info(\"Graphing Data\")\n pprog = self.prog_logs\n cursor = pprog.find({})\n data = {\n \"emotional\": [],\n \"physical\": [],\n \"cognitive\": []\n }\n comp = self.get_timeframe(timeframe)\n for doc in cursor:\n date = list(doc.keys())[1]\n try:\n datecomp = datetime.datetime.strptime(date, \"%Y-%m-%d %H:%M\")\n except:\n datecomp = datetime.datetime.today()\n if datecomp > datetime.datetime.combine(comp, datetime.time.min):\n for key in data.keys():\n rating = int(doc[date][\"data\"][key][\"rating\"])\n data[key].append(rating)\n plt.ylabel('Level')\n plt.xlabel('Number of Logs - Ordered By Date')\n for key in data.keys():\n plt.plot(data[key])\n plt.legend(['Emotional', 'Physical', 'Cognitive'], loc='upper left')\n plt.show()",
"def graphify(evictions_per_week):\r\n weeks = []\r\n for week in evictions_per_week.keys():\r\n if '2020' in week:\r\n weeks.append(week)\r\n evictions_filed = []\r\n for week in weeks:\r\n evictions_filed.append(evictions_per_week[week])\r\n plt.figure(figsize=(50, 10))\r\n plt.plot(weeks, evictions_filed)\r\n plt.xlabel('Date')\r\n plt.ylabel('Evictions filed')\r\n plt.title('Evictions filed by the week')\r\n plt.show()\r\n return weeks, evictions_filed",
"def forebears (WFROM,WTO,efrom, eto, g=25):\n \n c.execute(\"\"\"\n SELECT wyear, eyear, count (eyear), wnationality\n FROM clean \n WHERE (eyear IS NOT Null) AND (wyear IS NOT Null)\n AND WYEAR >= ? and WYEAR <= ? \n AND eyear >= ? AND eyear <= ? \n GROUP BY wyear, eyear\n ORDER BY wyear, eyear\"\"\", (WFROM, WTO, efrom, eto))\n\n years = c.fetchall()\n epigraphtotal = sum (s for (x,y,s,n) in years)\n #plt.xlim(WFROM, WTO)\n #plt.ylim(100, -1500)\n #colors = list(mcolors.TABLEAU_COLORS.keys()) *20\n #print(colors)\n \n \n gen =dd(lambda: dd(int))\n gentotal= dd(int)\n for (x,y,s,n) in years:\n gen[generation(x,g)][generation(y-x,g)] += 1\n gentotal[generation(x,g)] +=1\n \n for x in gen:\n for y in gen[x]:\n print(x, y, gen[x][y], gentotal[x])\n\n \n\n plt.figure(figsize=(10, 5))\n ax=plt.axes()\n\n\n #df.plot(colormap=gray) \n cumtotal = [0]*len(gen)\n\n for d in range(0,-200, -1):\n #for d in range(min(gen.keys()),max(gen.keys()),-1):\n xv = list(gen.keys())\n yv = [rat(gen[x][d],gentotal[x]) for x in xv]\n plt.bar(xv, yv, bottom=cumtotal,\n tick_label=[x*g for x in xv])\n cumtotal = [x + y for x, y in zip(yv, cumtotal)]\n #colors.pop()\n #print(d, cumtotal)\n plt.xlabel('Year of Work (in generations)')\n plt.ylabel(f'Share of Distance to forebear (in {g} year generations)')\n plt.title(f'Distance back vs Year of Work ({epigraphtotal} epigraphs)')\n plt.savefig(f\"figs/eg-forebear-{WFROM}:{WTO}-{efrom}:{eto}-{g}.png\")\n plt.close()",
"def graph11():\r\n sheet = workbook.sheet_by_index(4)\r\n data = [[sheet.cell_value(r, c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]\r\n\r\n for i in range(2, sheet.nrows):\r\n for k in range(1, 6):\r\n list_data[i-2].append({'value': round((data[i][k]/sum(data[i][j] for j in range(1, 6)))*100, 2), 'label': '%.2f%s' %(round((data[i][k]/sum(data[i][j] for j in range(1, 6)))*100, 2), '%')})\r\n\r\n line_chart = pygal.HorizontalBar(print_labels=True, stack_from_top=False)\r\n line_chart.title = 'เปรียบเทียบรอยละของประชากรอายุ 15 ปขึ้นไปที่ดื่มสุราหรือเครื่องดื่มมึนเมา จําแนกตามความถี่ในการดื่มสุราหรือเครื่องดื่มมึนเมา ปี 2544 - 2557'\r\n line_chart.x_labels = [\"ดื่มทุกวัน\", \"5-6 วันต่อสัปดาห์\", \"3-4 วันต่อสัปดาห์\", \"1-2 วันต่อสัปดาห์\", \"ดื่มนานๆครั้ง\"]\r\n line_chart.y_labels = map(int, range(0, 71, 10))\r\n for i in range(6):\r\n line_chart.add(data_name[i], list_data[i])\r\n line_chart.render_to_file('11Compare graph of Classified by frequency of drinking in 2544 - 2557.svg')",
"def average_revenue():\n graph = pygal.SolidGauge(inner_radius=0.70)\n usd_formatter = lambda x: '{:.10g}M$'.format(x)\n graph.value_formatter = usd_formatter\n graph.title = \"Average Revenue of Movies per year\"\n\n for year in range(2000, 2017):\n print(\">> Year : %i\" % year)\n\n # Start display\n print(\">> [status] Create Graph Starting!\")\n\n dataset = pd.read_csv(\"Top-100_Export/Top-100_%i.csv\" % (year))\n revenue = dataset[\"revenue\"].tolist() #Revenue\n temp = []\n for i in revenue:\n if i != 0:\n temp.append(i)\n average = ((((sum(temp)/len(temp)))/1000000//0.01)/100)\n graph.add(str(year), [{'value': average, 'max_value': 250}])\n\n # End display\n print(\">> [status] Created Graph Successful!\")\n\n graph.render_to_file(\"Graph_Export/Average_Revenue_of_Movies.svg\")\n\n # Used time\n print(\">> [status] Completed : Used time = %s seconds\" % (time.time() - start_time))",
"def charting(lim=2020):\r\n for indic in ['FLR ', 'CRE ', 'TISA', 'SSPI', 'US7 ']:\r\n for c in ['A', 'M', 'P', 'T', 'all']:\r\n # TODO: fix charting for SSPI - it returns three values\r\n data = chart_data(indic, '2018-09-01', 12*5, c, lim=lim).set_index('date').sort_index()\r\n y = ['SP1', 'SP2', 'SP5', 'SSPI'] if indic == 'SSPI' else ['Perc.idv', 'Perc.ids']\r\n data.plot(kind='line', y=y)\r\n plt.xticks(range(len(data)), data.index.tolist(), rotation=30)\r\n plt.xlabel(None)\r\n plt.axhline(y=100, color='r', linestyle='-', label='Individual target')\r\n plt.axhline(y=75, color='b', linestyle='-', label='Industry target')\r\n plt.title(centres[c] + ' ' + indic)\r\n plt.savefig('pic/' + str(lim) + c + indic.strip() + '.png')\r\n logging.info('pic/' + str(lim) + c + indic.strip() + '.png saved')",
"def visualize_data(total_rewards):\n\n x_values = arange(0, len(total_rewards), 1)\n y_values = total_rewards\n plot(x_values, y_values)\n xlabel('episodes')\n ylabel('cumulative rewards')\n title('Reward by Episode')\n grid(True)\n show()",
"def visualize_time_series(fig_ax, data, inp_color, missing_data, lag_color, first_date,\n x_label=\"Number of Days\", y_label=\"Log of Aluminium Price\", title=\"Prices over time\"):\n fig, ax = fig_ax\n ((x_train_raw, y_train_raw), y_pred_list) = data\n\n missing_x, missing_y = missing_data\n is_missing = len(missing_x) != 0\n\n first_date = datetime.strptime(first_date, '%Y-%m-%d')\n\n convert_date = lambda x: [\n np.datetime64((first_date + timedelta(days=d)).strftime('%Y-%m-%d'))\n for d in x\n ]\n convert_price = lambda x: x[\"Output\"].to_list()\n\n x_train = convert_date(x_train_raw[\"Date\"].to_list())\n y_train = convert_price(y_train_raw)\n \n cut_point = x_train[-1]\n ax.plot(x_train, y_train, color=color[inp_color])\n\n for i, y_pred in enumerate(y_pred_list):\n data, plot_name, color_code, is_bridge = y_pred\n mean_pred, x_test_raw = data[\"mean\"], data[\"x\"]\n x_test = convert_date(x_test_raw)\n\n if i == 0 and is_missing:\n missing_x = convert_date(missing_x)\n ax.axvline(x_test[0], color=color[lag_color], linestyle='--', linewidth=0.5, dashes=(5, 0), alpha=0.2)\n ax.plot([missing_x[-1], x_test[0]], [missing_y[-1], mean_pred[0]], color[lag_color], linestyle=\"dashed\")\n ax.axvspan(cut_point, x_test[0], color=color[lag_color], alpha=0.1)\n\n plot_bound(ax, data, x_test, color[color_code], plot_name)\n\n if is_bridge and (not is_missing): \n ax.plot([x_train[-1], x_test[0]], [y_train[-1], mean_pred[0]], color[color_code], linewidth=1.5)\n\n if is_missing:\n ax.plot(missing_x, missing_y, color=color[lag_color], linestyle=\"dashed\")\n ax.plot([x_train[-1], missing_x[0]], [y_train[-1], missing_y[0]], color[lag_color], linestyle=\"dashed\")\n ax.axvline(cut_point, color=color[lag_color], linestyle='--', linewidth=0.5, dashes=(5, 0), alpha=0.2)\n else:\n ax.axvline(cut_point, color=color[\"k\"], linestyle='--')\n\n ax.xaxis.set_minor_locator(AutoMinorLocator())\n ax.legend()\n\n # ax.set_xlabel(x_label)\n ax.set_ylabel(y_label)\n ax.set_title(title)\n\n # ax.set_xlim(left=cut_point-np.timedelta64(1, 'm'))\n plot_axis_date(ax, x_train + missing_x + x_test)\n ax.grid()\n return fig, ax",
"def display(self):\n \n # initialize SQL kit to access database\n s = SQL_Kit(self.userID, self.password, self.database)\n \n \n \"\"\" Total Activity by hour \"\"\"\n \n # get activity data\n all_date_times = self.activity().index\n\n all_days = []\n all_hours = []\n for item in all_date_times:\n all_days.append((item.timetuple().tm_yday))\n all_hours.append(item.hour)\n\n x = all_days\n y = all_hours\n x_labels = pd.Series(all_days).unique()\n\n fig1, ax1 = plt.subplots()\n ax1.set_title('Hourly Activity')\n ax1.scatter(x,y,color='mediumspringgreen',linewidths=1)\n ax1.set_xlabel('day of year')\n ax1.set_ylabel('hour')\n ax1.xaxis.grid(True)\n\n if len(x_labels) > 5:\n ax1.xaxis.set_ticks([min(all_days), max(all_days)])\n else:\n ax1.xaxis.set_ticks(x_labels)\n\n ax1.yaxis.grid(False) \n plt.show()\n \n \n \"\"\" MOVING AVERAGE \"\"\"\n \n df = self.activity().reset_index()\n\n def day_of_year(datetime_entry):\n return datetime_entry.timetuple().tm_yday\n\n df['day_of_year'] = list(df.apply(lambda x: day_of_year(x['EventDateTime']),axis=1))\n daily_count = df['day_of_year'].value_counts().sort_index()\n\n averages = []\n i=1\n for value_count in daily_count:\n values = daily_count[:i]\n average = round(sum(values)/len(values),2)\n averages.append(average)\n i+=1\n\n day_list = list(df['day_of_year'].unique())\n\n avg_move_df = pd.DataFrame([day_list,averages]).T\n avg_move_df.rename(columns={0: 'day_id', 1: 'moving_avg'},inplace=True)\n avg_move_df.set_index('day_id',inplace=True)\n \n fig1, ax1 = plt.subplots()\n ax1.plot(avg_move_df.index.astype(int),avg_move_df['moving_avg'], color='mediumspringgreen')\n ax1.set_title('Moving AVG')\n ax1.set_xlabel('day_of_year')\n ax1.xaxis.set_ticks([min(all_days), max(all_days)])\n ax1.set_ylabel('Daily Activity')\n plt.show()\n \n \n \n \"\"\" Top 5 Samples \"\"\"\n \n data = s.select_table('sample')['SoundCategory'].value_counts()\n \n objects = list(data)[:5]\n y_pos = list(data.index)[:5]\n\n # get class info from class_absence_stats dataframe\n #fig2 = plt.figure(2) \n plt.bar(y_pos, objects, align='center', alpha=0.8, color='mediumspringgreen')\n plt.ylabel('Usage')\n plt.xlabel('Sound Category')\n plt.title('Top 5 Samples')\n plt.show()\n \n \n \"\"\" Top 3 Chords \"\"\"\n \n data = s.select_table('chord')['ChordLabel'].value_counts()\n\n objects = list(data)[:3]\n y_pos = list(data.index)[:3]\n\n # get class info from class_absence_stats dataframe\n #fig2 = plt.figure(2) \n plt.bar(y_pos, objects, align='center', alpha=0.8, color='mediumspringgreen')\n plt.ylabel('Usage')\n plt.xlabel('Chord Label')\n plt.title('Top 3 Chords')\n plt.show()\n \n \n \"\"\" Top 3 Wave Types \"\"\"\n \n # get SQL table data\n set_1 = s.select_table('createwave')\n set_2 = s.select_table('sequence')\n set_3 = s.select_table('arpeggio')\n set_4 = s.select_table('chord')\n\n # concat tables into single pandas series\n all_wave_types = pd.concat([set_1['WaveType'], set_2['WaveType'], set_3['WaveType'], set_4['WaveType']])\n\n # sort values, show top 3\n top_3 = all_wave_types.value_counts().head(3)\n\n\n # Pie chart, where the slices will be ordered and plotted counter-clockwise:\n labels = list(top_3.index)\n sizes = list(top_3.values)\n explode = (0, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\n fig1, ax1 = plt.subplots()\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, colors=['g','b','r'], startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n ax1.set_title('Top Wave Types')\n\n plt.show()",
"def graph12():\r\n sheet = workbook.sheet_by_index(5)\r\n data = [[sheet.cell_value(r, c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]\r\n\r\n for i in range(2, sheet.nrows):\r\n for k in range(1, 6):\r\n list_data[i-2].append({'value': round((data[i][k]/sum(data[i][j] for j in range(1, 6)))*100, 2), 'label': '%.2f%s' %(round((data[i][k]/sum(data[i][j] for j in range(1, 6)))*100, 2), '%')})\r\n\r\n line_chart = pygal.HorizontalBar(print_labels=True, stack_from_top=False)\r\n line_chart.title = 'เปรียบเทียบรอยละของประชากรอายุ 15 ปขึ้นไปที่ดื่มสุราหรือเครื่องดื่มมึนเมา จำแนกตามประเภทของสุราที่ดื่มบ่อย ปี 2544 - 2557'\r\n line_chart.x_labels = ['เบียร์', 'สุราแช่พื้นบ้าน (สาโท อุ กระแช่)', 'สุราขาว, สุราสี, สุรากลั่น', 'ไวน์', 'อื่นๆ']\r\n line_chart.y_labels = map(int, range(0, 61, 10))\r\n for i in range(4):\r\n line_chart.add(data_name[i], list_data[i])\r\n line_chart.render_to_file('12Compare graph of Classified by type og alcohol in 2544 - 2557.svg')",
"def emissions_baseline(self):\n baseline = DataFrame(columns=[\"CO2\", \"NOx\", \"PM10\", \"PM2.5\", \"SO2\"])\n baseline = baseline.append(year_1(self.plant.emissions()))\n baseline = baseline.append(year_1(self.plant.fuel_reseller().emissions()))\n baseline = baseline.append(year_1(self.farmer.emissions_exante))\n baseline.loc[\"Total\"] = baseline.sum()\n baseline.loc[\"Total_plant\"] = baseline.iloc[0]\n baseline.loc[\"Total_transport\"] = baseline.iloc[1]\n baseline.loc[\"Total_field\"] = baseline.iloc[2]\n return baseline",
"def visualize_yearly_fire_cause():\r\n fig, ax = plt.subplots(figsize=(20,20))\r\n data = pd.read_csv('.\\\\CSV_Files\\\\yearly_fire_cause.csv')\r\n data = data.loc[data['STAT_CAUSE_DESCR'].isin(['Lightning', 'Equipment Use', 'Miscellaneous', 'Children', 'Arson'])]\r\n plot_df = pd.pivot_table(data,index=data['FIRE_YEAR'], columns= data['STAT_CAUSE_DESCR'])\r\n ax.plot(range(1992,2016), plot_df)\r\n ax.set_title('Yearly Burn Damage Organized by Cause')\r\n ax.set_xlabel('Calendar Year')\r\n ax.set_ylabel('Amount Burned (sq mi)')\r\n ax.set_xticks(range(1992,2016))\r\n ax.set_xticklabels(range(1992,2016))\r\n plt.savefig('yearly_burn_damage_by_cause.png')\r\n plt.xlim([1993,2015])\r\n ax.legend(labels=['Arson', 'Children', 'Equipment Use', 'Lightning', 'Miscellaneous'])\r\n return plt.show()",
"def plot_data(self, data, backup_frame):\n title = self.filename.split('-')\n final_titles = title[2].split('.')\n self.final_title_sub = final_titles[0].lower()\n\n # Accounts for the three types of graph required\n # date for archival purposes\n # web for the web server and\n # log for the logarithmic graphs\n graph_list = ['date', 'web', 'log']\n for mode in graph_list:\n for column in data.columns:\n data['Rest of the World'] = \\\n backup_frame['Global_Cases'] - data[column]\n x_axis = data.index.values\n\n fig, axes = plt.subplots()\n axes.plot(x_axis, data[column], marker='o',\n label=column)\n axes.plot(x_axis, data['Rest of the World'], marker='s',\n label='Rest of the World')\n fig.autofmt_xdate()\n\n every_nth = 4\n for number, label in enumerate(axes.xaxis.get_ticklabels()):\n if number % every_nth != 0:\n label.set_visible(False)\n\n axes.set(xlabel='Date', ylabel='Cases',\n title=f'Covid-19 {self.final_title_sub} '\n f'cases for {column} - data from '\n f'John Hopkins CSSE')\n axes.grid()\n axes.legend()\n\n # Setting the y-axis\n if mode == 'log':\n axes.set_yscale('log')\n else:\n data_max = data.max(axis=1)\n max_number = data_max[-1]\n rounded_max = self.round_up(max_number, -3)\n rounded_max += 2000\n axes.set_ylim([0, rounded_max])\n\n # -----------------------------------------------------\n # Adds Labels to annotate the last data point for each\n # plot\n y_axis1 = data[column][-1]\n y_axis2 = data['Rest of the World'][-1]\n\n plt.annotate(y_axis1, (x_axis[-1], y_axis1 + 500),\n bbox=dict(facecolor='blue', alpha=0.5),\n fontsize=12)\n plt.annotate(y_axis2, (x_axis[-1], y_axis2 + 500),\n bbox=dict(facecolor='red', alpha=0.5),\n fontsize=12)\n # -----------------------------------------------------\n\n # Required in order to stop the column from summing\n # the total of each run through the loop\n # otherwise this leads to Rest of World values in the\n # millions\n data = data.drop('Rest of the World', axis=1)\n\n if mode == 'log':\n dir_name = f'{self.out_dir}docs/graphics/' \\\n f'log_' \\\n f'{self.final_title_sub}_for_' \\\n f'{column}.png'\n elif mode == 'date':\n dir_name = f'{self.out_dir}docs/graphics/' \\\n f'{x_axis[-1]}-2020-' \\\n f'{self.final_title_sub}_for_{column}.png'\n\n elif mode == 'web':\n dir_name = f'{self.out_dir}docs/graphics/' \\\n f'{self.final_title_sub}_for_{column}.png'\n\n else:\n print('error')\n\n fig.savefig(dir_name, transparent=False, dpi=300,\n bbox_inches=\"tight\")\n\n if os.path.exists(dir_name):\n logging.debug('File saved at: %s', {dir_name})\n print(f'Files saved at:\\n'\n f'{dir_name}\\n')\n else:\n logging.debug('Failed to save')\n logging.debug(os.getcwd())\n plt.close()\n return data",
"def graph4():\r\n sheet = workbook.sheet_by_index(3)\r\n data = [[sheet.cell_value(r, c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]\r\n\r\n for i in range(2, sheet.nrows):\r\n list_data[0].append((data[i][0], round((data[i][1]/data[i][2])*100, 2)))\r\n list_data[1].append((data[i][0], round((data[i][3]/data[i][4])*100, 2)))\r\n list_data[2].append((data[i][0], round((data[i][5]/data[i][6])*100, 2)))\r\n list_data[3].append((data[i][0], round((sum(data[i][j] for j in range(1, 6, 2))/sum(data[i][j] for j in range(2, 7, 2)))*100, 2)))\r\n\r\n line_graph = pygal.XY()\r\n line_graph.title = 'สัดส่วนของผู้ที่ดื่มแอลกอฮอล์เป็นประจำรวม และแยกตามกลุ่มอายุ ระหว่างปี 2544 ถึง 2557'\r\n line_graph.x_labels = (2544, 2546, 2548, 2550, 2552, 2554, 2556, 2558)\r\n for i in range(4):\r\n line_graph.add(data_name[i], list_data[i])\r\n line_graph.render_to_file('4Percentage of regular drinkers among drinkers by age groups between 2001 and 2014.svg')",
"def ratio(gb_data, data_depcode, data_ratio_hospitalises,current_date, data_hospitalises, current_date_file, min_value_80p , nbhospitalises_80p) :\n start = time.time()\n fig, ax = plt.subplots(figsize=(12, 8))\n\n plt.title(f\"Ratio of in-hospital deaths to hospitalizations : {current_date}\", fontsize=20)\n plt.ylabel(\"Total number of deceases / Total number of hospitalized\")\n plt.xlabel(\"Total number of hospitalized\")\n\n for i, txt in enumerate(data_depcode):\n if (data_hospitalises[i] > data_hospitalises.max() * 0.20):\n ax.annotate(txt, (data_hospitalises[i], data_ratio_hospitalises[i]), xytext=(data_hospitalises[i] + 20, data_ratio_hospitalises[i])) \n\n plt.axhline(data_ratio_hospitalises.mean(), color='green', linestyle='--', label=f'average death ratio ({data_ratio_hospitalises.mean():.2f}%)')\n\n plt.axvline(min_value_80p, color='pink', linestyle='-', label=f\"80% of the number of hospitalized people in France are on the right side of the line ({nbhospitalises_80p:.0f} hospitalized)\")\n\n ax.scatter(data_hospitalises, data_ratio_hospitalises)\n\n ax.annotate('updated chart',xy=(1, 0), xytext=(-15, 10), fontsize=15,\n xycoords='axes fraction', textcoords = 'offset points',\n bbox=dict(facecolor = 'white', alpha = 0.9),\n horizontalalignment = 'right', verticalalignment = 'bottom')\n\n ax.yaxis.set_major_formatter(mtick.FormatStrFormatter('%.0f%%'))\n plt.legend()\n\n current_date_file = gb_data['date'].max().strftime('%Y%m%d')\n end = time.time()\n print(\"Time spent on ratio plot: {0:.5f} s.\".format(end - start)) \n plt.show()",
"def plot_data(self):",
"def main(argv):\n scenario = int(argv[1])\n state = argv[2]\n lyear = datetime.date.today().year - 1\n print(f\"This report covers the inclusive years 2008-{lyear} for {state}\")\n\n df = read_sql(\n \"\"\"\n WITH iahuc12 as (\n SELECT huc_12 from huc12 where states = %s and scenario = 0\n ), agg as (\n SELECT r.huc_12, extract(year from valid)::int as yr,\n sum(qc_precip) as precip, sum(avg_runoff) as runoff,\n sum(avg_delivery) as delivery,\n sum(avg_loss) as detachment from results_by_huc12 r JOIN iahuc12 i\n on (r.huc_12 = i.huc_12) WHERE r.scenario = %s\n and r.valid >= '2008-01-01'\n and r.valid <= %s GROUP by r.huc_12, yr\n )\n\n SELECT yr, round((avg(precip) / 25.4)::numeric, 2) as precip_in,\n round((avg(runoff) / 25.4)::numeric, 2) as runoff_in,\n round((avg(delivery) * 4.463)::numeric, 2) as delivery_ta,\n round((avg(detachment) * 4.463)::numeric, 2) as detachment_ta\n from agg GROUP by yr ORDER by yr\n \"\"\",\n get_dbconnstr(\"idep\"),\n params=(state, scenario, datetime.date(lyear, 12, 31)),\n index_col=\"yr\",\n )\n\n print(df)\n print(df.mean())\n\n (fig, ax) = plt.subplots(1, 1)\n ax.bar(df.index.values, df[\"detachment_ta\"].values)\n for year, row in df.iterrows():\n ax.text(\n year,\n row[\"detachment_ta\"] + 0.2,\n f\"{row['detachment_ta']:.1f}\",\n ha=\"center\",\n )\n ax.axhline(\n df[\"detachment_ta\"].mean(), label=\"mean\", zorder=5, color=\"k\", lw=1.5\n )\n ax.legend(loc=\"best\")\n ax.grid(True)\n ax.set_xlim(df.index.values[0] - 0.5, df.index.values[-1] + 0.5)\n ax.set_ylabel(\"Yearly Detatchment [tons/acre]\")\n ax.set_title(\n f\"{state_names[state]} Daily Erosion Project Iowa's Yearly Detachment\"\n )\n fig.text(\n 0.01,\n 0.01,\n f\"Plot generated {datetime.datetime.now():%d %B %Y}\",\n )\n fig.savefig(\"test.png\")",
"def test_2d_plot(self):\n db = pd.HDFStore('test.h5')\n df_iv = db['iv']\n dates = df_iv[df_iv['dte'] == 30]['date']\n impl_vols = df_iv[df_iv['dte'] == 30]['impl_vol']\n db.close()\n\n print df_iv.sort_values('impl_vol').head()\n\n plt.plot(dates, impl_vols)\n plt.xlabel('date')\n plt.ylabel('impl_vols')\n plt.show()",
"def graph1():\r\n sheet = workbook.sheet_by_index(0)\r\n data = [[sheet.cell_value(r, c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]\r\n\r\n for i in range(2, sheet.nrows):\r\n list_data[0].append((data[i][0], round((data[i][3]/data[i][1])*100, 2)))\r\n list_data[1].append((data[i][0], round((data[i][4]/data[i][2])*100, 2)))\r\n list_data[2].append((data[i][0], round(((data[i][3] + data[i][4])/(data[i][1] + data[i][2]))*100, 2)))\r\n\r\n line_graph = pygal.XY()\r\n line_graph.title = 'อัตราการดื่มเครื่องดื่มแอลกอฮอล์รวม และแยกตามเพศ ระหว่างปี 2544 ถึง 2557'\r\n line_graph.x_labels = (2544, 2546, 2548, 2550, 2552, 2554, 2556, 2558)\r\n for i in range(3):\r\n line_graph.add(data_name[i], list_data[i])\r\n line_graph.render_to_file('1Alcohol consumption rate by genders between 2001 and 2014.svg')",
"def continent_data_le(data):\n data_1997 = data[data.year == 1997]\n europe_1997 = data_1997[data_1997.continent == 'Europe']\n america_1997 = data_1997[data_1997.continent == 'Americas']\n\n plt.subplot(2, 1, 1)\n plt.title('Life Expectancy')\n plt.hist(europe_1997.lifeExpectancy)\n plt.ylabel('Europe ')\n\n plt.subplot(2, 1, 2)\n plt.hist(america_1997.lifeExpectancy)\n plt.ylabel('America')\n\n plt.show()",
"def visualize_train_data(train_df, fname):\r\n logger = custom_logger.CustomLogger(run_id+':'+file_id)\r\n fig, axs = plt.subplots(3, figsize=(15,15))\r\n fig.suptitle('EPEX Intraday Continuous market electricity prices')\r\n\r\n axs[0].plot(train_df.index, train_df['low'], color='red')\r\n axs[0].set_title(\"Lowest Price\")\r\n axs[0].set(xlabel='time', ylabel='price (Euros)')\r\n\r\n axs[1].plot(train_df.index, train_df['high'], color='green')\r\n axs[1].set_title(\"Highest Pice\")\r\n axs[1].set(xlabel='time', ylabel='price (Euros)')\r\n\r\n axs[2].plot(train_df.index, train_df['weight_avg'], color='blue')\r\n axs[2].set_title(\"volume-weighted Average Price\")\r\n axs[2].set(xlabel='time', ylabel='price (Euros)')\r\n\r\n fig.savefig(os.path.join(unique_op_dir, fname))\r\n logger.info('Training data plots stored at ', os.path.join(unique_op_dir, fname))",
"def avgMergetime_graph(df):\n\n x = df['Merged_YM']\n y = df['mergetime']\n fig, ax = plt.subplots()\n x_pos = np.arange(len(x)) # <--\n plt.bar(x_pos, y)\n plt.xticks(x_pos, x) # <--\n # Make space for and rotate the x-axis tick labels\n fig.autofmt_xdate()\n ax.xaxis_date()\n addlabels(x, y)\n plt.xlabel(\"Dates\")\n plt.ylabel(\"Merge Time in Days\")\n plt.title(\"Avg Merge Times\")\n plt.savefig('AvgMergeTimes.png', dpi=400)\n plt.show()",
"def showLevels(self):\n\n pa = 'EUR_USD GBP_USD AUD_USD USD_CAD USD_CHF NZD_USD'.split(' ')\n gr = 'D H4 H1 M30 M15'.split(' ')\n for i in xrange(len(pa)):\n dfs = p.DataFrame()\n for j in xrange(len(gr)):\n try:\n training = self.viewTraining(pa[i], gr[j])\n df = training[0]\n manifest = training[1]\n dfs = dfs.combine_first(manifest.set_index('timeframe'))\n plot(df.get_values())\n except: \n ''\n try:\n dfs['timeframe'] = dfs.index # save the lost field before calling set_index()\n print dfs.set_index('forecast').sort(ascending=False)\n except: ''\n dfp = p.read_csv('/ml.dev/bin/data/oanda/ticks/{0}/{0}-M5.csv'.format(pa[i])).sort(ascending=True).tail(50).ix[:,'closeAsk']\n plot(dfp)\n title('{0} Forecast'.format(pa[i]))\n legend(gr)\n show();\n #break",
"def graph3():\r\n sheet = workbook.sheet_by_index(2)\r\n data = [[sheet.cell_value(r, c) for c in range(sheet.ncols)] for r in range(sheet.nrows)]\r\n\r\n for i in range(2, sheet.nrows):\r\n list_data[0].append((data[i][0], round((data[i][1]/data[i][2])*100, 2)))\r\n list_data[1].append((data[i][0], round((data[i][3]/data[i][4])*100, 2)))\r\n list_data[2].append((data[i][0], round((data[i][5]/data[i][6])*100, 2)))\r\n list_data[3].append((data[i][0], round((sum(data[i][j] for j in range(1, 6, 2))/sum(data[i][j] for j in range(2, 7, 2)))*100, 2)))\r\n\r\n line_graph = pygal.XY()\r\n line_graph.title = 'อัตราการดื่มเครื่องดื่มแอลกอฮอล์รวม และแยกตามกลุ่มอายุ ระหว่างปี 2544 ถึง 2557'\r\n line_graph.x_labels = (2544, 2546, 2548, 2550, 2552, 2554, 2556, 2558)\r\n for i in range(4):\r\n line_graph.add(data_name[i], list_data[i])\r\n line_graph.render_to_file('3Alcohol consumption rate by age groups between 2001 and 2014.svg')",
"def graph_year_count(df):\r\n # set the visual features of the graph\r\n sns.set(font_scale=1.5)\r\n sns.set_style(\"darkgrid\")\r\n fig, ax = plt.subplots()\r\n fig.set_size_inches(15, 8)\r\n plt.xticks(rotation=45)\r\n ax.set_title(\"Yearly Police Deaths\")\r\n # create the graph of the data\r\n plot = sns.barplot(\"year\", \"count\", data=df, palette=\"winter_d\")\r\n # plt.show()\r\n # save the graph as an image\r\n fig.savefig(\"1_graph_year_count.png\")",
"def diesel_2014():\n import plotly.plotly as py\n import plotly.graph_objs as go\n py.sign_in('littlejab', 'yblima8sc3')\n chart_min = go.Bar(\n x = ['Jan 14', 'Feb 14', 'Mar 14', 'Apr 14', 'May 14', 'Jun 14', 'Jul 14', 'Aug 14', \\\n 'Sep 14', 'Oct 14', 'Nov 14', 'Dec 14'],\n y = [29.99, 29.99, 29.99, 29.99, 29.99, 29.91, 29.85, 29.86, 29.99, 29.66, 29.41, 27.6],\n name = 'Min'\n )\n chart_avg = go.Bar(\n x = ['Jan 14', 'Feb 14', 'Mar 14', 'Apr 14', 'May 14', 'Jun 14', 'Jul 14', 'Aug 14', \\\n 'Sep 14', 'Oct 14', 'Nov 14', 'Dec 14'],\n y = [29.99, 29.99, 29.99, 29.99, 29.99, 29.91, 29.85, 29.86, 29.99, 29.66, 29.42, 27.64],\n name = 'Average'\n )\n chart_max = go.Bar(\n x = ['Jan 14', 'Feb 14', 'Mar 14', 'Apr 14', 'May 14', 'Jun 14', 'Jul 14', 'Aug 14', \\\n 'Sep 14', 'Oct 14', 'Nov 14', 'Dec 14'],\n y = [29.99, 29.99, 29.99, 29.99, 30.05, 30.01, 29.85, 29.86, 29.99, 29.66, 29.42, 27.91],\n name = 'Max'\n )\n data = [chart_min, chart_avg, chart_max]\n layout = go.Layout(barmode = 'group')\n fig = go.Figure(data = data, layout = layout)\n plot_url = py.plot(fig, filename = 'Diesel 2014')",
"def generate_day_comparison():\n df = pd.read_csv(\"/Users/maxwell/Documents/workspace/CoronaScan/results.csv\",\n names=[i for i in subreddits])\n\n row_values = df.to_numpy()\n counts = row_values[get_offset() + 1]\n vals = []\n for i in counts:\n vals.append(int(i))\n plt.rcParams['xtick.major.pad']='8'\n N = len(subreddits)\n fig, chart = plt.subplots()\n index = np.arange(N)\n width = 0.35\n plot = chart.bar(index, vals, width)\n for i, v in enumerate(vals):\n chart.text(i-.2, v/(vals[i]+100), vals[i], fontsize=11)\n\n chart.set_xticks(index)\n chart.set_xticklabels(subreddits, rotation=45, ha='right', minor=False, fontsize=8)\n chart.set_xlabel(\"Subreddit\", fontsize=14)\n chart.set_ylabel(\"Number of Mentions\", fontsize=14)\n chart.set_title(\"Keyword Mentions by Subreddit on \" +\n str(datetime.date.today()), fontsize=20, pad=20)\n\n plt.tight_layout()\n fig.set_size_inches(18.5, 10.5)\n fig.savefig(\"/Users/maxwell/Documents/workspace/CoronaScan/plots/daily_bar_graphs/\" +\n str(datetime.date.today()), bbox_inches='tight')"
] | [
"0.61644155",
"0.6124098",
"0.6031411",
"0.5979967",
"0.59787333",
"0.5971069",
"0.5943029",
"0.59208447",
"0.58984315",
"0.58976746",
"0.5869825",
"0.5843553",
"0.5836322",
"0.5829148",
"0.5803515",
"0.57898813",
"0.5787882",
"0.5771819",
"0.57714844",
"0.57601196",
"0.57472736",
"0.5746871",
"0.5679892",
"0.5671504",
"0.5663277",
"0.5660882",
"0.5657573",
"0.5652226",
"0.56110245",
"0.5610384"
] | 0.73401976 | 0 |
Cross analyzes the baseline with 2020's eviction data. NOTE Requires you to run the above functions | def cross_analyze(evictions_filed, base_evictions_filed, weeks):
plt.figure(figsize=(50, 10))
plt.plot(weeks, evictions_filed, label = '2020')
plt.plot(weeks, base_evictions_filed, label = '2015-2016')
plt.xlabel('Date', fontsize = 25)
plt.ylabel('Evictions filed', fontsize = 25)
plt.title('Evictions filed by the week', fontsize = 40)
plt.legend()
plt.annotate('Texas Supreme Court puts a temporary \n stay on eviction proceedings.', xy = ('3/8/2020', 1551), fontsize = 15)
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def CrossCheck(dataloader):",
"def do_crossval():\n df = read_df()\n # X = df['review'].apply(remove_html_lower)\n\n X = df['review']\n y = df['sentiment']\n X_train, X_holdout, y_train, y_holdout = train_test_split(X, y, test_size=0.3, shuffle=True, stratify=y, random_state=222 )\n\n tfidf = TfidfVectorizer(stop_words='english', min_df=2, max_df=0.8, ngram_range=(1,4))\n stem_pipeline = make_pipeline(TextNormalizer(), tfidf, LogisticRegression(C=100))\n cv = StratifiedShuffleSplit(n_splits=3, test_size=0.2)\n\n scores = cross_val_score(stem_pipeline, X_train, y_train, cv=cv, scoring='accuracy', n_jobs=-1)\n print(scores, scores.mean())",
"def _cross_over(self,mp,cross_rate,eta):",
"def cross_valid_key(model,x,key,preds,target,metric,verbose=True): \r\n\r\n score=[]\r\n \r\n keys = x[key].unique().tolist()\r\n \r\n\r\n\r\n for idx, item in enumerate([1,2,3,4,5]):\r\n\r\n xtrain,xtest = split_camp(x,keys,0.2)\r\n \r\n model.fit(xtrain[feat],xtrain[target])\r\n\r\n ypred = model.predict(xtest[feat])\r\n \r\n ytrue= xtest[target].values \r\n \r\n if metric == 'mae':\r\n score.append(mae(ytrue,ypred))\r\n elif metric == 'mse':\r\n score.append(mse(ytrue,ypred))\r\n elif metric == 'rrmse':\r\n score.append(rrmse(ytrue,ypred))\r\n\r\n else:\r\n score.append(rmse(xtest[target].tolist(),ypred))\r\n\r\n if verbose:\r\n print('-'*30)\r\n print(f'\\nFold {idx} out of 5')\r\n print(f'Key {item}')\r\n print(f'{metric}: {score[idx]}')\r\n\r\n \r\n\r\n if verbose:\r\n print(f'\\n Overall Score:')\r\n print(f'{metric}: Mean: {np.mean(score)} Std: {np.std(score)}')\r\n\r\n\r\n return score",
"def cross_valid(model,x,folds,metric,verbose=True): \r\n\r\n score=[]\r\n \r\n\r\n kf = KFold(folds,shuffle=False,random_state=0) \r\n\r\n\r\n i=0\r\n for train_index, test_index in kf.split(x):\r\n\r\n xtrain = x[train_index,:]\r\n xtest = x[test_index,:]\r\n\r\n model.fit(xtrain[:,:-1],xtrain[:,-1])\r\n\r\n ypred = model.predict(xtest[:,:-1])\r\n\r\n ytrue= xtest[:,-1] \r\n \r\n \r\n if metric == 'mae':\r\n score.append(mae(ytrue,ypred))\r\n elif metric == 'mse':\r\n score.append(mse(ytrue,ypred))\r\n elif metric == 'rrmse':\r\n score.append(rrmse(ytrue,ypred))\r\n\r\n else:\r\n score.append(rmse(xtest[:,-1],ypred))\r\n\r\n if verbose:\r\n print('-'*30)\r\n print(f'\\nFold {i+1} out of {folds}')\r\n print(f'{metric}: {score[i]}')\r\n\r\n i+=1\r\n\r\n if verbose:\r\n print(f'\\n Overall Score:')\r\n print(f'{metric}: Mean: {np.mean(score)} Std: {np.std(score)}')\r\n\r\n\r\n return score",
"def emissions_baseline(self):\n baseline = DataFrame(columns=[\"CO2\", \"NOx\", \"PM10\", \"PM2.5\", \"SO2\"])\n baseline = baseline.append(year_1(self.plant.emissions()))\n baseline = baseline.append(year_1(self.plant.fuel_reseller().emissions()))\n baseline = baseline.append(year_1(self.farmer.emissions_exante))\n baseline.loc[\"Total\"] = baseline.sum()\n baseline.loc[\"Total_plant\"] = baseline.iloc[0]\n baseline.loc[\"Total_transport\"] = baseline.iloc[1]\n baseline.loc[\"Total_field\"] = baseline.iloc[2]\n return baseline",
"def crossValidate(self, args):\n\n ##################################\n # Read the training data\n ##################################\n if not os.path.isdir(args.annotationPath):\n print('annotation path does not exist: {}' \\\n .format(args.annotationPath))\n return -1\n\n data = self.readData(args.annotationPath)\n\n ############################\n # Execute the K-Fold cross validation\n ############################\n\n x = []\n y = []\n l = []\n for subject, df in data.items():\n lx = df[['gradient', 'rate']].values.tolist()\n #lx = df[['rate']].values.tolist()\n ly = np.array(df[['immersion']].values.tolist()).squeeze(-1)\n x.extend(lx)\n y.extend(ly.tolist())\n l.append(len(lx))\n\n x = np.array(x)\n y = np.array(y)\n\n print('Executing cross-validation with k = {}...'.format(args.k))\n clf = StructuredPerceptron(random_state=2)\n scores = []\n folds = SequenceKFold(l, n_folds=args.k)\n for train_idx, train_len, test_idx, test_len in folds:\n xTrain = x[train_idx]\n yTrain = y[train_idx]\n clf.fit(xTrain, yTrain, train_len)\n\n xTest = x[test_idx]\n yTest = y[test_idx]\n yPred = clf.predict(xTest, test_len)\n scores.append(accuracy_score(yTest, yPred))\n\n scores = np.array(scores)\n print(scores)\n print('Result of the K-Fold CV: {:3f} (+- {:3f})' \\\n .format(scores.mean(), 2 * scores.std()))\n\n ############################\n # Execute the Leave-One-Out cross validation\n ############################\n\n\n return 0",
"def main():\n\n # Input and Output files Error-Handling\n args = parse_args()\n if args.input is None:\n raise ImportError('Did not specify the correct input file!')\n if args.output is None:\n raise ImportError('Did not specify the correct output file!')\n\n # Read in the border_crossing data\n with open(args.input, mode='r') as csv_file:\n\n # Read the CSV data into a list of lists\n csv_reader = csv.reader(csv_file, delimiter=',')\n\n # Sort the list by Border, Date, and Measure in descending order\n sorted_list = sorted(csv_reader, key=itemgetter(3, 5))\n\n # Make sure the sorted_list rows are not empty\n if check_all_there(sorted_list):\n pass\n\n # Let's group the sorted list via the keys--border names, dates,\n # and measures, so that there are rows with the same border name, date,\n # measure, but different values! In each row, check if the\n # 6th index (this is our value) is a number and is not 0! If true, then\n # add those values together and create a new list, which holds this aggregated\n # summation of values for each border name, date, and measure\n list_with_agg_values = [key +\n [sum([int(r[6]) for r in rows if r[6].isdigit()\n and int(r[6]) != 0])]\n for key, rows in groupby(sorted_list,\n key=lambda x: x[3:6])]\n\n # x number of months -- could be a dictionary or int\n num_of_months = count_the_months(list_with_agg_values)\n\n # calculate the average crossing per month and per measure\n list_with_avg = calculate_average_crossing_per_month_and_measure(num_of_months,\n list_with_agg_values)\n\n # Sort the list by Date, Value, Measure, Border in descending order\n sorted_list_with_vbm = sorted(list_with_avg, key=itemgetter(3, 2, 0),\n reverse=True)\n final_sorted_list = sorted(sorted_list_with_vbm,\n key=lambda x: datetime.strptime(x[1],\n '%d/%m/%Y %H:%M:%S %p'),\n reverse=True)\n write_to_csv(args.output, final_sorted_list)",
"def crossvalidate(*args, **kwargs):\n\n scores = []\n j = 0\n for i, _ in enumerate(data):\n if i in good_patients:\n\n if 'silent' in kwargs:\n if kwargs['silent']:\n pass\n else:\n print \"real patient index:\", i\n else:\n print \"real patient index:\", i\n\n kwargs['patient_index'] = j\n score, reconstruction = runmodel(*args, **kwargs)\n scores.append(score)\n\n if 'save_reconstruction' in kwargs:\n if kwargs['save_reconstruction']:\n scipy.misc.imsave(\"patient_{}_reconstruction.png\".format(i), reconstruction)\n j += 1\n\n cvmodel = args[0].__class__.__name__\n print \"{} overall cross validated score {}\".format(cvmodel, np.mean(scores))\n return np.mean(scores)",
"def cross_validate(X, Y, folds=5):\n\n log = LogisticRegression(C=1.0, class_weight=None, dual=False, fit_intercept=True,\n intercept_scaling=1, max_iter=200, multi_class='ovr', n_jobs=3,\n penalty='l2', random_state=None, solver='liblinear', tol=0.0001,\n verbose=0, warm_start=False)\n \n\n \n\n\n scores_log = [] \n scores_forest = []\n index = np.arange(X.shape[0])\n score_log = 0\n score_forest = 0\n \n for i in range(folds):\n score_log = 0\n score_forest = 0\n \n test_index = np.random.choice(index, int(X.shape[0]*1/folds),replace=False)\n index = np.setdiff1d(np.arange(X.shape[0]),test_index)\n \n test_x = X[test_index]\n test_y = Y[test_index]\n\n log.fit(X[index],Y[index])\n pred_log = log.predict(test_x)\n \n ran.fit(X[index],Y[index])\n pred_ran = ran.predict(test_x)\n \n for i in range(len(test_y)):\n if(pred_log[i] == test_y[i]):\n score_log += 1\n if(pred_ran[i] == test_y[i]):\n score_forest += 1\n scores_log.append(score_log/len(test_y))\n scores_forest.append(score_forest/len(test_y))\n \n\n return (np.mean(scores_log),np.mean(scores_forest))",
"def forecast_CV_surge(self, title = \"Cross-Validation\", value_name = \"Actuals\",\n upper_bound_name = \"Worst-Case\", lower_bound_name = \"Best-Case\",\n upper_bound_color = \"red\", lower_bound_color = \"blue\", predicted_color = \"orange\",\n fill_color_and_opacity = 'rgba(50, 50, 50, .2)', roll_avg = False, roll_avg_num = 7):\n \n if roll_avg:\n self._df['Values'] = self._df['Values'].rolling(window=roll_avg_num, closed = 'left').mean()\n self._df['Predicted'] = self._df['Predicted'].rolling(window=roll_avg_num, closed = 'left').mean()\n self._df['UB'] = self._df['UB'].rolling(window=roll_avg_num, closed = 'left').mean()\n self._df['LB'] = self._df['LB'].rolling(window=roll_avg_num, closed = 'left').mean()\n \n fig = go.Figure(\n data = go.Scatter(\n name=value_name,\n mode=\"markers+lines\", \n x=self._df.index, \n y=self._df[\"Values\"],\n marker_symbol=\"circle\", \n marker_size = 6,\n line_color = \"black\"\n ),\n layout_title_text = title,\n layout_template = \"ggplot2\"\n )\n \n fig.add_trace(go.Scatter(\n name=\"Predicted\",\n mode=\"lines\", \n x=self._df.index, \n y=self._df[\"Predicted\"], \n line_color = predicted_color,\n line_width = 2\n ))\n \n fig.add_trace(go.Scatter(\n name=upper_bound_name,\n mode=\"lines\", \n x=self._df.index, \n y=self._df[\"UB\"], \n line_color = upper_bound_color,\n line_width = 2\n ))\n \n fig.add_trace(go.Scatter(\n name=lower_bound_name,\n mode=\"lines\", \n x=self._df.index, \n y=self._df[\"LB\"], \n line_color = lower_bound_color,\n fill = 'tonexty',\n fillcolor = fill_color_and_opacity,\n line_width = 2\n ))\n \n \n return fig",
"def add_evidence( \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n self, \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n symbol=\"IBM\", \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n sd=dt.datetime(2008, 1, 1), \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n ed=dt.datetime(2009, 1, 1), \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n sv=10000, \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n ): \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n \t\t \t \t\t \t \t\t\t \t\t \t\t\t \t \t \t\t \t\t \t\n # add your code to do learning here\n\n converged = False\n x = np.zeros((3,1))\n dates = pd.date_range(sd,ed)\n df_prices = ind.get_price(symbol, dates)\n\n daily_rets = (df_prices / df_prices.shift(1)) - 1\n daily_rets = daily_rets[1:]\n\n\n sd_older = sd - dt.timedelta(days=365)\n dates_older = pd.date_range(sd_older,ed)\n df_prices_older = ind.get_price(symbol, dates_older)\n sd_key = df_prices.index[0]\n sd_index = df_prices_older.index.get_loc(sd_key)\n\n num_bins = len(self.bins)\n max_state_idx = num_bins + num_bins*10 + num_bins*100\n\n # Call Q-Learner Constructor\n self.learner = QLearner( \n num_states=(max_state_idx + 1), \n num_actions=3, \n alpha=0.01, \n gamma=0.0, \n rar=0.98, \n radr=0.9995, \n dyna=0, \n verbose=False, \n )\n\n # df_trades = df_prices.copy()\n df_holdings = df_prices.copy()\n df_holdings['Holdings'] = np.nan\n del df_holdings[symbol] # print(df_holdings)\n\n # Initlialize Vars\n cum_ret_prev = 0\n iters = 0\n conv_counter = 0\n Q_prev = np.copy(self.learner.Q)\n\n # Get Indicator Values\n _,_,ind1 = ind.get_BB(df_prices_older, self.lookback)\n ind2 = ind.get_CCI(df_prices_older, self.lookback)\n _,_,ind3 = ind.get_SMA_Cross(self.lookback, 100, df_prices_older)\n ind4 = ind.get_momentum(df_prices_older, self.lookback)\n _,_,ind5 = ind.get_MACD(df_prices_older)\n BB = ind1.iloc[sd_index:].values\n CCI = ind2.iloc[sd_index:].values\n SMA_Cross = ind3.iloc[sd_index:].values\n Momentum = ind4.iloc[sd_index:].values\n MACD = ind5.iloc[sd_index:].values\n _,self.x0bins = pd.qcut(BB[:,0], num_bins,labels=False,retbins=True)\n _,self.x1bins = pd.qcut(CCI[:,0],num_bins,labels=False,retbins=True)\n _,self.x2bins = pd.qcut(SMA_Cross[:,0],num_bins,labels=False,retbins=True)\n _,self.x3bins = pd.qcut(Momentum[:,0],num_bins,labels=False,retbins=True)\n _,self.x4bins = pd.qcut(MACD[:,0],num_bins,labels=False,retbins=True)\n x_0 = np.digitize(BB[:,0], self.x0bins[1:-1])\n x_1 = np.digitize(CCI[:,0], self.x1bins[1:-1])\n x_2 = np.digitize(SMA_Cross[:,0], self.x2bins[1:-1])\n x_3 = np.digitize(Momentum[:,0], self.x3bins[1:-1])\n x_4 = np.digitize(MACD[:,0], self.x4bins[1:-1])\n state = x_0 + x_3*10 + x_4*100\n\n \n while not converged:\n\n action = self.learner.querysetstate(state[0])\n daily_return = daily_rets.iloc[0][symbol]\n cur_price = df_prices.iloc[0][symbol]\n next_price = df_prices.iloc[1][symbol]\n df_holdings.iloc[0]['Holdings'], reward = self.take_action(0, action, cur_price, next_price)\n\n\n\n for day_idx in range(1,daily_rets.shape[0]):\n\n\n daily_return = daily_rets.iloc[day_idx][symbol]\n cur_price = df_prices.iloc[day_idx-1][symbol]\n next_price = df_prices.iloc[day_idx][symbol]\n df_holdings.iloc[day_idx]['Holdings'], reward = self.take_action(df_holdings.iloc[day_idx-1]['Holdings'], action, cur_price, next_price)\n action = self.learner.query(state[day_idx], reward)\n\n df_holdings.iloc[-1]['Holdings'] = 0\n df_trades = df_holdings.diff()\n df_trades['Trades'] = df_trades['Holdings']\n del df_trades['Holdings']\n df_trades.iloc[0]['Trades'] = 0\n\n\n portvals = msc.compute_portvals( \n df_trades,\n symbol, \n sv, \n self.commission, \n self.impact, \n )\n\n cum_ret = (portvals[-1] / portvals[0]) - 1\n Q_diff = np.abs(self.learner.Q - Q_prev)\n Q_max_diff = Q_diff.max()\n\n if iters > 20:\n\n # if abs(cum_ret - cum_ret_prev) < 0.0001:\n if Q_max_diff < 0.001:\n conv_counter += 1\n else:\n conv_counter = 0\n\n if conv_counter > 5 or iters > 20000:\n converged = True\n # if iters > 100:\n # if iters % 100 == 0:\n # print(\"Iteration #\", iters)\n print(\"----------------------------------------------\")\n print(\"-- --\")\n print(\"Iteration #\", iters)\n print(\"Error = \", abs(cum_ret - cum_ret_prev))\n print(\"Q Diff: \", Q_max_diff)\n print(\"Epsilon: \", self.learner.rar)\n\n cum_ret_prev = cum_ret\n Q_prev = np.copy(self.learner.Q)\n iters += 1\n self.learner.rar *= self.learner.radr\n # print(\"Iters = \", iters)\n print(\"Mode Trained in \", iters, \" iterations!\")\n np.savetxt('Q_Table.csv', self.learner.Q, delimiter=',')",
"def calEachCrossflow2peak():\n \n crossFlow = pd.read_csv('Data_crossflow.csv', index_col = 'Unnamed: 0')\n peakCross = crossFlow['Node2']\n crossFlowPeakFactor = peakCross/0.8\n \n peakCross2 = crossFlow['Node6']\n crossFlowPeakFactor2 = peakCross2/0.8\n #original_factor = peakCross/0.8\n #need to judge the sign of lateral flow according to CTF rule!!\n gapsToFlip = [2,4,6,7,9,11,13,14,16,18,20,21] #gaps in y direction\n gapsToFlipIndex = [x - 1 for x in gapsToFlip]\n for index in gapsToFlipIndex:\n crossFlowPeakFactor[index] = -crossFlowPeakFactor[index] \n crossFlowPeakFactor2[index] = -crossFlowPeakFactor2[index]\n \n return crossFlowPeakFactor, crossFlowPeakFactor2",
"def main():\n\n # Get dataset and create pandas dataframe\n f_data = \"../data/dataset.xlsx\"\n df = pd.read_excel(f_data)\n\n # Get variables for indices\n years = list(set(df[\"Year\"][3:]))\n years_arr = df[\"Year\"][3:]\n\n # Get values from dataset\n population = df[\"Population.1\"][3:]\n auto_commuters = df[\"Auto\"][3:]\n free_traffic = df[\"Freeway\"][3:]\n arterial_traffic = df[\"Arterial Street\"][3:]\n general_time_value = df[\"Cost Components\"][3:]\n commercial_time_value = df[\"Unnamed: 12\"][3:]\n gasoline_cost = df[\"Unnamed: 13\"][3:]\n diesel_cost = df[\"Unnamed: 14\"][3:]\n excess_fuel_per_commuter = df[\"Unnamed: 20\"][3:]\n annual_hrs_of_delay = df[\"Unnamed: 24\"][3:]\n travel_time_index = df[\"Travel Time Index\"][3:]\n cost_per_autocommuter = df[\"Unnamed: 34\"][3:]\n uber = df[\"Uber Entry Dummies\"][3:]\n lyft = df[\"Lyft Entry Dummies\"][3:]\n both = df[\"UberXlyft\"][3:]\n unemployment = df[\"Unemployment Rate (%)\"][3:]\n\n # Get covariances\n filled_ump = copy.deepcopy(unemployment).fillna(value=0)\n print(\"Correlation of uber and ump: {}\".format(np.corrcoef(filled_ump, uber)))\n print(\"Correlation of lyft and ump: {}\".format(np.corrcoef(filled_ump, lyft)))\n print(\"Covariance of tti and ump: {}\".format(np.corrcoef(filled_ump,\n travel_time_index.astype(np.float32))))\n print(\"Covariance of cost and ump: {}\".format(np.corrcoef(filled_ump,\n cost_per_autocommuter.astype(np.float32))))\n print(\"Covariance of excess and ump: {}\".format(np.corrcoef(filled_ump,\n excess_fuel_per_commuter.astype(np.float32))))\n print(\"Covariance of delay and ump: {}\".format(np.corrcoef(filled_ump,\n annual_hrs_of_delay.astype(np.float32))))\n\n # Create output data structure\n year_dict = {years[i]: {\"pop\": [], \"auto\": [], \"free\": [], \"art\": [],\n \"gen_time\": [], \"comm_time\": [], \"gas\": [], \"diesel\":\n [], \"ann_delay\": [], \"travel_index\": [], \"cost\":\n [], \"ub\": [], \"ly\": [], \"bo\": [], \"ump\": [],\n \"excess_gas\": []} for i in range(len(years))}\n\n # Counter variable\n i = 0\n\n # Iterate through everything for plots\n for year, pop, auto, free, art, gen_time, comm_time, gas, diesel, excess_gas, \\\n ann_delay, travel_index, cost, ub, ly, bo, ump in \\\n zip(years_arr, population, auto_commuters, free_traffic,\n arterial_traffic, general_time_value, commercial_time_value,\n gasoline_cost, diesel_cost, excess_fuel_per_commuter,\n annual_hrs_of_delay, travel_time_index, cost_per_autocommuter,\n uber, lyft, both, unemployment):\n\n # Append values to dictionary for plotting\n year_dict[year][\"pop\"].append(pop)\n year_dict[year][\"auto\"].append(auto)\n year_dict[year][\"free\"].append(free)\n year_dict[year][\"art\"].append(art)\n year_dict[year][\"gen_time\"].append(gen_time)\n year_dict[year][\"comm_time\"].append(comm_time)\n year_dict[year][\"gas\"].append(gas)\n year_dict[year][\"diesel\"].append(diesel)\n year_dict[year][\"ann_delay\"].append(ann_delay)\n year_dict[year][\"travel_index\"].append(travel_index)\n year_dict[year][\"cost\"].append(cost)\n year_dict[year][\"ub\"].append(ub)\n year_dict[year][\"ly\"].append(ly)\n year_dict[year][\"bo\"].append(bo)\n year_dict[year][\"ump\"].append(ump)\n year_dict[year][\"excess_gas\"].append(excess_gas)\n\n # Average values according to year\n for key_i in list(year_dict.keys()):\n for key_j in list(year_dict[key_i].keys()):\n vals = copy.deepcopy(year_dict[key_i][key_j])\n year_dict[key_i][key_j] = np.mean(vals)\n\n # Now make arrays for time series data\n pop_by_year = [year_dict[years[i]][\"pop\"] for i in range(len(years))]\n auto_by_year = [year_dict[years[i]][\"auto\"] for i in range(len(years))]\n free_by_year = [year_dict[years[i]][\"free\"] for i in range(len(years))]\n art_by_year = [year_dict[years[i]][\"art\"] for i in range(len(years))]\n gen_time_by_year = [year_dict[years[i]][\"gen_time\"] for i in range(len(years))]\n comm_time_by_year = [year_dict[years[i]][\"comm_time\"] for i in range(len(\n years))]\n gas_by_year = [year_dict[years[i]][\"gas\"] for i in range(len(years))]\n diesel_by_year = [year_dict[years[i]][\"diesel\"] for i in range(len(years))]\n ann_delay_by_year = [year_dict[years[i]][\"ann_delay\"] for i in range(len(\n years))]\n travel_index_by_year = [year_dict[years[i]][\"travel_index\"] for i in\n range(len(years))]\n cost_by_year = [year_dict[years[i]][\"cost\"] for i in range(len(years))]\n ub_by_year = [year_dict[years[i]][\"ub\"] for i in range(len(years))]\n ly_by_year = [year_dict[years[i]][\"ly\"] for i in range(len(years))]\n bo_by_year = [year_dict[years[i]][\"bo\"] for i in range(len(years))]\n ump_by_year = [year_dict[years[i]][\"ump\"] for i in range(len(years))]\n excess_gas_per_year = [year_dict[years[i]][\"excess_gas\"] for i in range(len(\n years))]\n\n\n # Make plots\n plt.plot(years, pop_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Average Population of UMR Urban Centers (1000s)\")\n plt.title(\"Average Population of Urban Mobility Report Urban Centers over Time\")\n plt.savefig(\"../graphs/pop_vs_time.png\")\n plt.clf()\n\n plt.plot(years, auto_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Autocommuters (1000s)\")\n plt.title(\"Average Number of Autocommuters in UMI Urban Centers (1000s)\")\n plt.savefig(\"../graphs/auto_vs_time.png\")\n plt.clf()\n\n plt.plot(years, free_by_year, color=\"b\", label=\"Freeways\")\n plt.plot(years, art_by_year, color=\"r\", label=\"Arterial Roads\")\n plt.legend()\n plt.xlabel(\"Year\")\n plt.ylabel(\"Driving Distance (miles)\")\n plt.title(\"Average Net Freeway/Arterial Road Driving over Time (\"\n \"1000s of miles)\")\n plt.savefig(\"../graphs/dist_vs_time.png\")\n plt.clf()\n\n plt.plot(years, gen_time_by_year, color=\"b\", label=\"General Value\")\n plt.plot(years, comm_time_by_year, color=\"r\", label=\"Commercial Value\")\n plt.legend()\n plt.xlabel(\"Year\")\n plt.ylabel(\"Value ($/hr)\")\n plt.title(\"Average General and Commercial Values of Time over Time\")\n plt.savefig(\"../graphs/val_of_time_vs_time.png\")\n plt.clf()\n\n plt.plot(years, gas_by_year, color=\"b\", label=\"Gasoline\")\n plt.plot(years, diesel_by_year, color=\"r\", label=\"Diesel\")\n plt.legend()\n plt.xlabel(\"Year\")\n plt.ylabel(\"Cost ($/gallon)\")\n plt.title(\"Average Cost of Gasoline and Diesel Fuel over Time\")\n plt.savefig(\"../graphs/gas_vs_time.png\")\n plt.clf()\n\n plt.plot(years, ann_delay_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Annual per-Commuter Traffic Delays (hrs)\")\n plt.title(\"Average Annual per-Commuter Traffic Delays over Time\")\n plt.savefig(\"../graphs/delay_vs_time.png\")\n plt.clf()\n\n plt.plot(years, travel_index_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Travel Index\")\n plt.title(\"Average Travel Index over Time\")\n plt.savefig(\"../graphs/index_vs_time.png\")\n plt.clf()\n\n plt.plot(years, ump_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Unemployment Rate (%)\")\n plt.title(\"Average Unemployment Rate over Time\")\n plt.savefig(\"../graphs/ump_vs_time.png\")\n plt.clf()\n\n plt.plot(years, cost_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Cost ($)\")\n plt.title(\"Average Annual per-Capita Cost of Traffic Congestion over Time\")\n plt.savefig(\"../graphs/cost_vs_time.png\")\n plt.clf()\n\n plt.plot(years, excess_gas_per_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Excess Fuel Consumed (Gallons)\")\n plt.title(\"Average Annual per-Capita Excess Fuel Consumed over Time\")\n plt.savefig(\"../graphs/extra_fuel_vs_time.png\")\n plt.clf()\n\n x = list(lyft) # Lyft data\n y = list(uber) # Uber data\n bins = [2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018]\n\n plt.hist([x, y], bins, label=['Lyft', 'Uber'])\n plt.legend(loc='upper right')\n plt.xlabel(\"Year\")\n plt.ylabel(\"Number of cities entered\")\n plt.title(\"Uber and Lyft Entry into Urban Mobility Report Cities\")\n plt.clf()",
"def cross_tabulation(actual, classified):\n crosstab = np.zeros((np.unique(actual).size, np.unique(actual).size))\n for i in range(0, actual.size):\n crosstab[classified[i]-1][actual[i]-1] += 1\n \n total = 0\n diagonal = 0\n for r in range(0, crosstab.shape[0]):\n for c in range(0, crosstab.shape[1]):\n total += crosstab[r][c]\n if (r == c):\n diagonal += crosstab[r][c]\n print(\"The overall accuracy is: \" + str(diagonal / total * 100) + \"%\")\n \n return crosstab",
"def calEachCrossflow():\n \n crossFlow = pd.read_csv('Data_crossflow.csv', index_col = 'Unnamed: 0')\n peakCross = crossFlow['Node2']\n crossFlowPeakFactor = peakCross/0.8\n\n #need to judge the sign of lateral flow according to CTF rule!!\n gapsToFlip = [2,4,6,7,9,11,13,14,16,18,20,21] #gaps in y direction\n gapsToFlipIndex = [x - 1 for x in gapsToFlip]\n for index in gapsToFlipIndex:\n crossFlowPeakFactor[index] = -crossFlowPeakFactor[index] \n \n return crossFlowPeakFactor",
"def eda_base():\n ######################################\n # Missing Values\n ######################################\n # cat_cols, num_cols, cat_but_car, num_but_cat = grab_col_names(df)\n # Observations: 356255\n # Variables: 122\n # cat_cols: 15\n # num_cols: 67\n # cat_but_car: 1\n # num_but_cat: 39\n global train, test, df\n train = pd.read_csv('datasets/home-credit-default-risk/application_train.csv')\n test = pd.read_csv('datasets/home-credit-default-risk/application_test.csv')\n df = train.append(test).reset_index(drop=True)\n\n df.isnull().sum()\n df.isnull().sum().sum() # 10670198\n df.shape\n # df.dropna(inplace=True)\n # msno.matrix(df.sample(250))\n # plt.show()\n\n df = df[df['CODE_GENDER'] != 'XNA']\n df['DAYS_EMPLOYED'].replace(365243, np.nan, inplace=True)\n\n df[df.columns[df.isnull().any()]]\n\n cat_cols, num_cols, cat_but_car, num_but_cat = grab_col_names(df)\n\n na_cols_cat = [col for col in cat_cols if df[col].isnull().sum() > 0]\n df[na_cols_cat] = df[na_cols_cat].apply(lambda x: x.fillna(x.mode()), axis=0)\n\n na_cols_num = [col for col in num_cols if df[col].isnull().sum() > 0 and \"TARGET\" not in col]\n df[na_cols_num] = df[na_cols_num].apply(lambda x: x.fillna(x.median()), axis=0)\n\n na_cols_cat_but_car = [col for col in cat_but_car if df[col].isnull().sum() > 0]\n df[na_cols_cat_but_car] = df[na_cols_cat_but_car].apply(lambda x: x.fillna(x.mode()), axis=0)\n\n na_cols_num_but_cat = [col for col in num_but_cat if df[col].isnull().sum() > 0 and \"TARGET\" not in col]\n df[na_cols_num_but_cat] = df[na_cols_num_but_cat].apply(lambda x: x.fillna(x.median()), axis=0)\n\n df['OCCUPATION_TYPE'] = df['OCCUPATION_TYPE'].fillna(df['OCCUPATION_TYPE'].mode()[0])\n\n ######################################\n # Feature Engineering\n ######################################\n\n #############################################\n # Outliers\n #############################################\n\n #############################################\n # Label Encoding\n #############################################\n\n #############################################\n # Rare Encoding\n #############################################\n\n #############################################\n # One-Hot Encoding\n #############################################\n df = pd.get_dummies(df, dummy_na=True)\n df.shape\n #############################################\n # Standart Scaler\n #############################################\n\n ######################################\n # Modeling\n ######################################\n global train_df, test_df\n train_df = df[df['TARGET'].notnull()]\n test_df = df[df['TARGET'].isnull()].drop(\"TARGET\", axis=1)\n\n global X, y, X_train, X_test, y_train, y_test\n y = train_df[\"TARGET\"]\n X = train_df.drop([\"SK_ID_CURR\", \"TARGET\"], axis=1)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.20, random_state=1)",
"def run(self, df: pd.DataFrame, model: Any):\n print(f'Running cross validation with the following model:\\n{model}')\n\n df['timestamp'] = pd.to_datetime(df['timestamp'])\n\n date_1 = datetime.datetime(year=2016, month=1, day=1)\n date_2 = datetime.datetime(year=2016, month=4, day=1)\n date_3 = datetime.datetime(year=2016, month=7, day=1)\n date_4 = datetime.datetime(year=2016, month=10, day=1)\n date_5 = datetime.datetime(year=2017, month=1, day=1)\n\n summaries: List[FoldSummary] = []\n\n for train_start, train_end, test_start, test_end in [\n (date_1, date_2, date_2, date_3),\n # (date_1, date_3, date_3, date_4),\n # (date_1, date_4, date_4, date_5)\n ]:\n print('Calculating train and test datasets')\n train_df = df[(df['timestamp'] >= train_start) & (df['timestamp'] < train_end)]\n test_df = df[(df['timestamp'] >= test_start) & (df['timestamp'] < test_end)]\n\n columns = list(train_df.columns)\n columns.remove('timestamp')\n columns.remove('meter_reading')\n\n print(columns)\n\n train_data = train_df[columns]\n test_data = test_df[columns]\n\n print(f'Fitting the model on train dataset of size {len(train_data)}')\n model.fit(train_data, train_df['meter_reading'])\n print(f'Predicting for test dataset of size {len(test_data)}')\n predictions = model.predict(test_data)\n\n score = self._calculate_score(predictions, test_df['meter_reading'])\n print(f'Score: {score}')\n\n summaries.append(FoldSummary(\n train_start=train_start,\n train_end=train_end,\n test_start=test_start,\n test_end=test_end,\n score=score\n ))\n\n filename = f'../resources/runs/{time.time()}.txt'\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n with open(filename, 'w+') as f:\n f.write(f'{model.__class__.__name__}\\n')\n f.write(f'{str(model.get_params())}\\n')\n for summary in summaries:\n f.write(f'Summary (\\n'\n f'\\ttrain start: {summary.train_start}\\n'\n f'\\ttrain end: {summary.train_end}\\n'\n f'\\ttest start: {summary.test_start}\\n'\n f'\\ttest end: {summary.test_end}\\n'\n f'\\tscore: {summary.score}\\n'\n f')\\n')\n\n print(summaries)\n\n return model",
"def calEachCrossflowAllAxialNode():\n AxialNodeno = 14 # axial node number in CFD data\n Nodes = []\n base = 'Node'\n for i in range(0, AxialNodeno):\n Nodes.append(base+str(i))\n \n crossFlow = pd.read_csv('Data_crossflow.csv', index_col = 'Unnamed: 0')\n lateralFactors = []\n for node in Nodes:\n lateralFactors.append(crossFlow[node]/0.8)\n #need to judge the sign of lateral flow according to CTF rule!!\n gapsToFlip = [2,4,6,7,9,11,13,14,16,18,20,21] #gaps in y direction\n gapsToFlipIndex = [x - 1 for x in gapsToFlip]\n for factors in lateralFactors:\n for index in gapsToFlipIndex:\n factors[index] = -factors[index] \n #note: lateralFactors is a list of list\n \n #below calculate factors averaged over all subchannels\n crossFlowAveFactor = crossFlow.apply(abs).mean(axis = 0)/0.8\n lateralFactorsAvelist = []\n for i in range(0,14):\n base = []\n for j in range(0,24):\n base.append(crossFlowAveFactor[i])\n lateralFactorsAvelist.append(base)\n \n \n for i in range(0, 14):\n for j in range(0, 24):\n #note, in the original model there is only one sign for all source\n #terms in one sub-channel. therefore -- sign(crossFlow.iloc[j,2])\n lateralFactorsAvelist[i][j] = lateralFactorsAvelist[i][j] *sign(crossFlow.iloc[j,2]) \n for each in lateralFactorsAvelist:\n for index in gapsToFlipIndex:\n each[index] = -each[index] \n \n \n return lateralFactors, lateralFactorsAvelist",
"def _kfold_cross_val(self, training_elms: np.ndarray) -> None:\n kf = model_selection.KFold(\n n_splits=config.folds, shuffle=True, random_state=config.seed\n )\n self.df[\"elm_events\"] = training_elms\n self.df[\"fold\"] = -1\n for f_, (_, valid_idx) in enumerate(kf.split(X=training_elms)):\n self.df.loc[valid_idx, \"fold\"] = f_",
"def perform_backtests(self):\r\n \r\n for test_name in self.testing_dates:\r\n print('\\t|--Test #{}'.format(test_name))\r\n test_dates = self.testing_dates[test_name]\r\n print('\\t\\t|--Performing Nested Cross-Validation')\r\n cross_validation = CrossValidate()\r\n cross_validation.output_names = self.output_names\r\n cross_validation.feature_names = self.feature_names\r\n cross_validation.feature_dict = self.feature_dict\r\n cross_validation.full_df = self.final_df_output\r\n cross_validation.cv_params = self.testing_dates\r\n cross_validation.test_name = test_name\r\n cross_validation.walk_forward_cv()\r\n self.optimal_params['Test #{}'.format(test_name)] = cross_validation.optimal_params_by_output\r\n self.cv_model_metadata['Test #{}'.format(test_name)] = cross_validation.cv_metadata_by_output\r\n \r\n print('\\t\\t|--Performing Out-Of-Sample Testing')\r\n prediction = Predict()\r\n prediction.output_names = self.output_names\r\n prediction.feature_names = self.feature_names\r\n prediction.feature_dict = self.feature_dict\r\n prediction.optimal_params_by_output = cross_validation.optimal_params_by_output\r\n prediction.cv_predictions_by_output = cross_validation.cv_predictions_by_output\r\n prediction.full_df = self.final_df_output\r\n prediction.pred_start = test_dates['pred_start']\r\n prediction.pred_end = test_dates['pred_end']\r\n prediction.run_prediction()\r\n self.full_predictions['Test #{}'.format(test_name)] = prediction.predictions_by_output\r\n self.pred_model_metadata['Test #{}'.format(test_name)] = prediction.pred_metadata_by_output\r\n \r\n print('\\nSaving model metadata...')\r\n with open(path.deployment_cv_results, 'w') as file:\r\n json.dump(self.optimal_params, file)\r\n with open(path.deployment_cv_metadata, 'w') as file:\r\n json.dump(self.cv_model_metadata, file)\r\n with open(path.deployment_pred_model_metadata, 'w') as file:\r\n json.dump(self.pred_model_metadata, file)\r\n with open(path.deployment_full_predictions, 'w') as file:\r\n json.dump(self.full_predictions, file)",
"def contrastSide(trim = True, exclOverlap = False, exclY = True):\n\t\n\tfig = plt.figure()\n\ttitle = \"Effect of contrast - exclOverlap = %s - exclY = %s\" % (exclOverlap, exclY)\n\tplt.suptitle(title)\n\t\n\tfor sacc in [\"1\", \"2\", \"3\"]:\n\t\t\n\t\tcolList = [\"#ef2929\", \"#3465a4\",\"#73d216\", \"#f57900\"]\n\t\tplt.subplot(1,3, int(sacc))\n\t\tplt.title(\"sacc = %s\"% (sacc))\n\t\t\n\t\t# Exp 1:\n\t\texp = \"004A\"\n\t\tdm1 = getDM.getDM(exp = exp, driftCorr = True, onlyControl = False)\n\n\t\t# This is the same for corrected landing positions (the saccade\n\t\t# doesn't change; only the reference point does)\n\t\tdm1 = dm1.select(\"endX%sNorm != ''\" % sacc, verbose = False)\n\t\tdm1 = dm1.select(\"endX%sNorm > -.5\" % sacc, verbose = False)\n\t\tdm1 = dm1.select(\"endX%sNorm < .5\" % sacc, verbose = False)\n\t\t\n\t\tif exclY:\n\t\t\tdm1 = dm1.select(\"endY%sNorm != ''\" % sacc)\n\t\t\tdm1 = dm1.select(\"endY%sNorm > -.5\" % sacc)\n\t\t\tdm1 = dm1.select(\"endY%sNorm < .5\" % sacc)\n\t\t\t\n\t\tfor dv in [\"endX%sNorm\" % sacc, \"endX%sCorrNorm\" % sacc]:\n\t\t\t\n\t\t\t#If wanted, trim the data\n\t\t\tif trim:\n\t\t\t\t_dm1 = dm1.selectByStdDev(keys = [\"contrast_side\", \"file\"], dv = dv)\n\n\t\t\t# For experiment 1 there are not enough third fixations anyway,\n\t\t\t# not even when not filtering on-object on the y-axis.\n\t\t\tif exp == \"004A\" and sacc == \"3\":\n\t\t\t\t\n\t\t\t\tcolList = [\"#ef2929\", \"#3465a4\"]\n\t\t\t\tcontinue\n\t\t\t\n\t\t\t# Get pivot matrix:\n\t\t\tpm = PivotMatrix(_dm1, [\"contrast_side\"], [\"file\"], dv=dv, colsWithin=True)#, xLabels = [\"left\", \"control\", \"right\"])\n\t\t\tcol = colList.pop()\n\t\t\tpm.plot(fig = fig, nLvl1 = 1, colors = [col])\n\n\t\t# Experiment 2 and 3:\n\t\tdv = \"endX%sNorm\" % sacc\n\t\t\n\t\tfor exp in [\"004B\", \"004C\"]:\n\t\t\tif exclY and exp == \"004B\" and sacc == \"3\":\n\t\t\t\tcolList = [\"#ef2929\"]\n\t\t\t \n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif exp == \"004C\" and exclOverlap:\n\t\t\t\tdm = dm.select(\"gap == 'zero'\")\n\t\t\t\n\t\t\tprint \"EXP = \", exp\n\t\t\t\n\t\t\tdm = getDM.getDM(exp = exp, driftCorr = True, onlyControl = False)\n\t\t\t\n\t\t\t# This is the same for corrected landing positions (the saccade\n\t\t\t# doesn't change; only the reference point does)\n\t\t\tdm = dm.select(\"endX%sNorm != ''\" % sacc, verbose = False)\n\t\t\tdm = dm.select(\"endX%sNorm > -.5\" % sacc, verbose = False)\n\t\t\tdm = dm.select(\"endX%sNorm < .5\" % sacc, verbose = False)\n\t\t\t\n\t\t\tif exclY:\n\t\t\t\tdm = dm.select(\"endY%sNorm != ''\" % sacc)\n\t\t\t\tdm = dm.select(\"endY%sNorm > -.5\" % sacc)\n\t\t\t\tdm = dm.select(\"endY%sNorm < .5\" % sacc)\n\n\t\t\t\n\t\t\t#If wanted, trim the data\n\t\t\tif trim:\n\t\t\t\t_dm = dm.selectByStdDev(keys = [\"contrast_side\", \"file\"], dv = dv)\n\t\t\t# Get pivot matrix:\n\t\t\tpm = PivotMatrix(_dm, [\"contrast_side\"], [\"file\"], dv=dv, colsWithin=True)\n\t\t\tcol = colList.pop()\n\t\t\tpm.plot(fig = fig, nLvl1 = 1, colors = [col])\n\t\t\n\t\t# Modify plot:\n\t\tplt.ylim(-.2, .2)\n\t\t\n\t\tplt.legend([\"Exp1 (abs)\", \"Exp1 (corr)\", \"Exp2 (abs)\", \"Exp2 (sim)\"])\n\t\tif sacc == \"3\":\n\t\t\tplt.legend([\"Exp2 (abs)\", \"Exp2 (sim)\"])\n\t\t\tif exclY:\n\t\t\t\tplt.legend([\"Exp2 (sim)\"])\n\t\t\n\t\tplt.axhline(0, color = \"#888a85\", linestyle = \"--\", linewidth = 2)\n\t\n\tplt.savefig(\"%s.png\" % title)",
"def cross_val(model, data, n, target):\n scores = []\n splits = partition(data, n)\n for i in range(n):\n train_list = splits[:i] + splits[i+1:]\n train = pd.concat(train_list)\n test = splits[i]\n y_true = test[target]\n test = test.drop(columns=[target], axis=1)\n model.fit(train, estimator=BayesianEstimator, prior_type=\"BDeu\")\n y_pred = model.predict(test)\n acc = accuracy_score(y_pred[target], y_true)\n scores.append(acc)\n return scores",
"def crossValidate(dataset, folds):\n\tshuffle(dataset)\n\tcv_results = []\n\tprecision_recall_acc = []\n\tfoldSize = int(len(dataset)/folds)\n\tfor i in range(0,len(dataset),foldSize):\n\t\t# preparing data\n\t\tvalD = dataset[i:i+foldSize]\n\t\ttestD = dataset[:i]+dataset[i+foldSize:] #list(set(dataset)-set(dataset[i:i+foldSize]))\n\t\t# Training\n\t\tprint(\"*\"*60)\n\t\tprint(\"Training on data-set size \"+str(len(testD))+\" of batch \"+str(i/(foldSize)))\n\t\tclassi = trainClassifier(testD)\n\t\t# Prediction on validation data \n\t\tprint(\"Predicting on heldout data-set size...\"+str(len(valD))+\" of batch \"+str(i/(foldSize)))\n\t\ty_true = list(map(lambda t: t[1], valD))\n\t\ty_pred = predictLabels(valD,classi)\t\t\n\t\t# Performance Metrics\t\t\n\t\t# average based on macro as it calculate metrics for each label, and find their unweighted mean.\n\t\tprecision_recall = list(precision_recall_fscore_support(y_true, y_pred, average='macro'))\n\t\tacc = accuracy_score(y_true,y_pred)\n\t\tprecision_recall[-1] = acc\n\t\tprint(precision_recall)\n\t\tprecision_recall_acc.append(precision_recall)\n\tdf = pd.DataFrame(precision_recall_acc,columns = [\"Precision\",\"Recall\",\"F1 score\",\"Accuracy Score\"])\n\tprint(df)\n\tcv_results = df.mean().tolist()\n\treturn cv_results",
"def keynesian_cross(T, I, G, C):\n # The data vector to be plotted for production and aggregate expenditure:\n Y_arrey = np.linspace(0,300)\n PE_arrey = (C * (Y_arrey - T) + I + G)\n degree = Y_arrey\n\n # The figure\n fig = plt.figure(figsize=(10,5))\n ax = fig.add_subplot(1,1,1)\n\n ax.plot(Y_arrey, degree, label=\"45-degree line\", color='lightblue',linewidth=3)\n ax.plot(Y_arrey, AD_arrey, label=\"AD=C+I+G+NX\", color='darkorange',linewidth=3)\n\n ax.set_xlabel(\"Y\")\n ax.set_ylabel(\"PE\")\n ax.legend(loc=\"upper left\")\n\n ax.grid()\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_visible(False)\n ax.spines['left'].set_visible(False)\n return",
"def co_average_metrics_fx():\r\n co_inv_overall_28d_list = []\r\n co_inv_50k_28d_list = []\r\n co_inv_100k_28d_list = []\r\n co_inv_manager_28d_list = []\r\n co_inv_sales_28d_list = []\r\n co_inv_key_roles_28d_list = []\r\n co_inv_it_28d_list = []\r\n co_inv_hourly_28d_list = []\r\n\r\n co_fut_cost_overall_28d_list = []\r\n co_fut_cost_50k_28d_list = []\r\n co_fut_cost_100k_28d_list = []\r\n co_fut_cost_manager_28d_list = []\r\n co_fut_cost_sales_28d_list = []\r\n co_fut_cost_key_roles_28d_list = []\r\n co_fut_cost_it_28d_list = []\r\n co_fut_cost_hourly_28d_list = []\r\n\r\n for x in range(len(start_list)):\r\n a = start_list[x]\r\n b = end_list[x]\r\n\r\n co_inv_overall_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_inv_overall')].rolling(window=28).mean()\r\n co_inv_50k_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_inv_50k')].rolling(window=28).mean()\r\n co_inv_100k_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_inv_100k')].rolling(window=28).mean()\r\n co_inv_manager_28d_values = df_co_metrics.iloc[a:b,df_co_metrics.columns.get_loc('co_inv_manager')].rolling(window=28).mean()\r\n co_inv_sales_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_inv_sales')].rolling(window=28).mean()\r\n co_inv_key_roles_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_inv_key_roles')].rolling(window=28).mean()\r\n co_inv_it_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_inv_it')].rolling(window=28).mean()\r\n co_inv_hourly_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_inv_hourly')].rolling(window=28).mean()\r\n\r\n co_fut_cost_overall_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_overall')].rolling(window=28).mean()\r\n co_fut_cost_50k_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_50k')].rolling(window=28).mean()\r\n co_fut_cost_100k_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_100k')].rolling(window=28).mean()\r\n co_fut_cost_manager_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_manager')].rolling(window=28).mean()\r\n co_fut_cost_sales_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_sales')].rolling(window=28).mean()\r\n co_fut_cost_key_roles_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_key_roles')].rolling(window=28).mean()\r\n co_fut_cost_it_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_it')].rolling(window=28).mean()\r\n co_fut_cost_hourly_28d_values = df_co_metrics.iloc[a:b, df_co_metrics.columns.get_loc('co_fut_cost_hourly')].rolling(window=28).mean()\r\n\r\n co_inv_overall_28d_list.append(co_inv_overall_28d_values)\r\n co_inv_50k_28d_list.append(co_inv_50k_28d_values)\r\n co_inv_100k_28d_list.append(co_inv_100k_28d_values)\r\n co_inv_manager_28d_list.append(co_inv_manager_28d_values)\r\n co_inv_sales_28d_list.append(co_inv_sales_28d_values)\r\n co_inv_key_roles_28d_list.append(co_inv_key_roles_28d_values)\r\n co_inv_it_28d_list.append(co_inv_it_28d_values)\r\n co_inv_hourly_28d_list.append(co_inv_hourly_28d_values)\r\n\r\n co_fut_cost_overall_28d_list.append(co_fut_cost_overall_28d_values)\r\n co_fut_cost_50k_28d_list.append(co_fut_cost_50k_28d_values)\r\n co_fut_cost_100k_28d_list.append(co_fut_cost_100k_28d_values)\r\n co_fut_cost_manager_28d_list.append(co_fut_cost_manager_28d_values)\r\n co_fut_cost_sales_28d_list.append(co_fut_cost_sales_28d_values)\r\n co_fut_cost_key_roles_28d_list.append(co_fut_cost_key_roles_28d_values)\r\n co_fut_cost_it_28d_list.append(co_fut_cost_it_28d_values)\r\n co_fut_cost_hourly_28d_list.append(co_fut_cost_hourly_28d_values)\r\n\r\n print('avaerages calculated...') \r\n df_co_metrics['co_inv_overall_28d'] = pd.concat(co_inv_overall_28d_list)\r\n df_co_metrics['co_inv_50k_28d'] = pd.concat(co_inv_50k_28d_list)\r\n df_co_metrics['co_inv_100k_28d'] = pd.concat(co_inv_100k_28d_list)\r\n df_co_metrics['co_inv_manager_28d'] = pd.concat(co_inv_manager_28d_list)\r\n df_co_metrics['co_inv_sales_28d'] = pd.concat(co_inv_sales_28d_list)\r\n df_co_metrics['co_inv_key_roles_28d'] = pd.concat(co_inv_key_roles_28d_list)\r\n df_co_metrics['co_inv_it_28d'] = pd.concat(co_inv_it_28d_list)\r\n df_co_metrics['co_inv_hourly_28d'] = pd.concat(co_inv_hourly_28d_list)\r\n\r\n df_co_metrics['co_fut_cost_overall_28d'] = pd.concat(co_fut_cost_overall_28d_list)\r\n df_co_metrics['co_fut_cost_50k_28d'] = pd.concat(co_fut_cost_50k_28d_list)\r\n df_co_metrics['co_fut_cost_100k_28d'] = pd.concat(co_fut_cost_100k_28d_list)\r\n df_co_metrics['co_fut_cost_manager_28d'] = pd.concat(co_fut_cost_manager_28d_list)\r\n df_co_metrics['co_fut_cost_sales_28d'] = pd.concat(co_fut_cost_sales_28d_list)\r\n df_co_metrics['co_fut_cost_key_roles_28d'] = pd.concat(co_fut_cost_key_roles_28d_list)\r\n df_co_metrics['co_fut_cost_it_28d'] = pd.concat(co_fut_cost_it_28d_list)\r\n df_co_metrics['co_fut_cost_hourly_28d'] = pd.concat(co_fut_cost_hourly_28d_list)\r\n print(columns added...)",
"def test_cross_totals():\n pressure = np.array([1008., 1000., 947., 925., 921., 896., 891., 889., 866.,\n 858., 850., 835., 820., 803., 733., 730., 700., 645.,\n 579., 500., 494., 466., 455., 441., 433., 410., 409.,\n 402., 400., 390., 388., 384., 381., 349., 330., 320.,\n 306., 300., 278., 273., 250., 243., 208., 200., 196.,\n 190., 179., 159., 151., 150., 139.]) * units.hPa\n temperature = np.array([27.4, 26.4, 22.9, 21.4, 21.2, 20.7, 20.6, 21.2, 19.4,\n 19.1, 18.8, 17.8, 17.4, 16.3, 11.4, 11.2, 10.2, 6.1,\n 0.6, -4.9, -5.5, -8.5, -9.9, -11.7, -12.3, -13.7, -13.8,\n -14.9, -14.9, -16.1, -16.1, -16.9, -17.3, -21.7, -24.5, -26.1,\n -28.3, -29.5, -33.1, -34.2, -39.3, -41., -50.2, -52.5, -53.5,\n -55.2, -58.6, -65.2, -68.1, -68.5, -72.5]) * units.degC\n dewpoint = np.array([24.9, 24.6, 22., 20.9, 20.7, 14.8, 13.6, 12.2, 16.8,\n 16.6, 16.5, 15.9, 13.6, 13.2, 11.3, 11.2, 8.6, 4.5,\n -0.8, -8.1, -9.5, -12.7, -12.7, -12.8, -13.1, -24.7, -24.4,\n -21.9, -24.9, -36.1, -31.1, -26.9, -27.4, -33., -36.5, -47.1,\n -31.4, -33.5, -40.1, -40.8, -44.1, -45.6, -54., -56.1, -56.9,\n -58.6, -61.9, -68.4, -71.2, -71.6, -77.2]) * units.degC\n\n ct = cross_totals(pressure, temperature, dewpoint)\n assert_almost_equal(ct, 21.40 * units.delta_degC, 2)",
"def compute_error_cross_dataset(AL, train_y):\n # print(train_y.shape)\n nb = train_y.shape[0]\n error=np.power(np.add(train_y,-AL),2)*1/nb\n return error\n # raise NotImplementedError",
"def __init__(self):\r\n \r\n #self.max_stocks = 100\r\n self.max_stocks = 200\r\n \"\"\" cv_factor determines what portion of stocks to put in cross validation set and what portion\r\n to leave in training set. cv_factor = 2 means every other stock goes into cross validation\r\n set. cv_factor = 3 means every third stock goes into cross validation set \"\"\"\r\n self.cv_factor = 2 \r\n \"\"\" future_day is how many training days in the future we train for. Setting future_day = 25\r\n means we are measuring how the stock does 25 days out \"\"\"\r\n self.future_day = 25\r\n \"\"\" The train_dates are the dates for training and cross validation\"\"\"\r\n self.train_dates = []\r\n first_train_date = dateutl.days_since_1900('2001-01-01')\r\n num_train_dates = 10\r\n train_date_increment = 60\r\n self.train_dates.append(first_train_date)\r\n for iday in range(1,num_train_dates):\r\n last_train_date = self.train_dates[iday-1]\r\n self.train_dates.append(last_train_date + train_date_increment)\r\n \"\"\"self.train_dates[1] -= 1 \"\"\"\r\n \r\n \"\"\" test_dates are the dates we are using for testing \"\"\"\r\n self.test_dates = []\r\n first_test_date = dateutl.days_since_1900('2010-01-01')\r\n num_test_dates = 10\r\n test_date_increment = 60\r\n self.test_dates.append(first_test_date) \r\n for iday in range(1,num_test_dates):\r\n last_test_date = self.test_dates[iday-1]\r\n self.test_dates.append(last_test_date + test_date_increment)\r\n \"\"\"self.test_dates[1] -= 1\r\n self.test_dates[3] += 1\r\n self.test_dates[4] += 3\r\n self.test_dates[5] += 4\r\n self.test_dates.append(dateutl.days_since_1900('2010-01-01'))\r\n self.test_dates.append(dateutl.days_since_1900('2010-03-01'))\r\n self.test_dates.append(dateutl.days_since_1900('2010-05-01'))\r\n self.test_dates.append(dateutl.days_since_1900('2010-07-01'))\r\n self.test_dates.append(dateutl.days_since_1900('2010-09-01'))\r\n self.test_dates.append(dateutl.days_since_1900('2010-11-01'))\"\"\"\r\n \"\"\"train_history_days and train_increment set how many historical days we use to\r\n train and the increment used. Setting train_history_days = 21 and train_increment = 5\r\n means we are using the values at days days 5, 10, 15 and 20 days before the reference day\r\n as input features \"\"\"\r\n self.train_days = 21\r\n self.train_increment = 5\r\n self.features = ['rsi','tsi','ppo','adx','dip14','dim14','cci', \\\r\n 'cmo','mfi','natr','roc','stoch','uo']\r\n \"\"\" output is just a boolean about calling the output function to write out \r\n appropriate X and y matricies. The default is False meaning do not write out\r\n matricies \"\"\"\r\n self.output = False",
"def zeroCrossing(self,evap_threshold):\r\n\t\tself.splitBaseline =(np.mean(self.splitData[0:10]))\t\r\n\t\tsplit_max_index = np.argmax(self.splitData)\r\n\t\tsplit_min_index = np.argmin(self.splitData)\r\n\r\n\t\tif split_max_index >= split_min_index:\r\n\t\t\treturn self.zeroCrossingPosSlope(evap_threshold)\r\n\t\t\r\n\t\tif split_max_index < split_min_index:\r\n\t\t\treturn self.zeroCrossingNegSlope(evap_threshold)"
] | [
"0.5959022",
"0.5762439",
"0.57529324",
"0.5593366",
"0.55377454",
"0.5520248",
"0.5454742",
"0.5441111",
"0.53812",
"0.53765136",
"0.5361948",
"0.5333665",
"0.5293056",
"0.5286",
"0.52654094",
"0.52570784",
"0.52462506",
"0.523822",
"0.5229423",
"0.52216583",
"0.5211983",
"0.5209452",
"0.5191304",
"0.51593834",
"0.51591104",
"0.5139875",
"0.5107765",
"0.51048607",
"0.5077943",
"0.50715524"
] | 0.6554467 | 0 |
Check if a switch exist for device. | def _switch_exist(lge_device: LGEDevice, switch_desc: ThinQSwitchEntityDescription) -> bool:
if switch_desc.value_fn is not None:
return True
feature = switch_desc.key
if feature in lge_device.available_features:
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_switch(self):\n\n svc = \"urn:upnp-org:serviceId:SwitchPower1\"\n if not svc in self.services:\n raise RuntimeError, \"Device doesn't support the service\"\n \n status = self.get_variable(svc, \"Status\")\n return status == 1",
"def _verify_switch_created(self, switch):\n if not (\n hasattr(switch, \"switch_power\") and\n isinstance(switch.switch_power, switch_power_base.SwitchPowerBase)):\n raise errors.CapabilityNotReadyError(\n msg=\"'switch_power' capability is missing in hub device {} ({}),\"\n \" or is not an instance of SwitchPowerBase\".format(\n self.hub_name,\n type(switch).__name__),\n device_name=self._device_name)",
"def exists_device_node(self, device_node: Path) -> bool:\n try:\n self.get_by_path(device_node)\n except HardwareNotFound:\n return False\n return True",
"def device_exists(device):\n return os.path.exists('/sys/class/net/%s' % device)",
"def checkWifi():\n try:\n subprocess.check_output(\"iwgetid\")\n return True\n except subprocess.CalledProcessError: # if not connected\n return False",
"def _check_requirements_switchport_exists(self, device, intf_type, trunk_no_default_native,\n intf_name, vlan_action, vlan_id, c_tag):\n\n try:\n return_code = device.interface.switchport(int_type=intf_type,\n name=intf_name,\n get='True')\n except ValueError as e:\n self.logger.error(\"Fetching Switch port enable failed %s\"\n % (e.message))\n raise ValueError(\"Fetching Switch port enable failed\")\n\n if return_code is not None:\n result = device.interface.switchport_list\n if vlan_id is not None and vlan_action == 'add':\n vlan_range = list(itertools.chain.from_iterable(range(int(ranges[0]),\n int(ranges[1]) + 1) for ranges in ((el + [el[0]])[:2]\n for el in (miniRange.split('-')\n for miniRange in vlan_id.split(',')))))\n for intf in result:\n if intf['interface-name'] == intf_name:\n if not trunk_no_default_native and intf['mode'] == 'trunk'\\\n or trunk_no_default_native and\\\n intf['mode'] == 'trunk-no-default-native':\n if vlan_id is not None and vlan_action == 'add':\n if intf['vlan-id'] is not None:\n ret = self._check_list(vlan_range,\n intf['vlan-id'])\n if ret:\n if len(ret) == len(vlan_range):\n return False\n else:\n return True\n else:\n return False\n elif intf['mode'] == 'access':\n self.logger.error(\"Access mode is configured on interface,\"\n \"Pls remove and re-configure\")\n raise ValueError(\"Access mode is configured on interface,\"\n \"Pls remove and re-configure\")\n else:\n self.logger.error(\"Switchport mode %s is pre-configured on interface\",\n intf['mode'])\n raise ValueError(\"Switchport mode is pre-configured on interface\")\n\n return True",
"def get_switch(self, name):\n try:\n assert name in self.list_switches()\n return self.devices[name]\n except KeyError:\n raise UnknownDevice(name)",
"async def connected(self) -> bool:\n args = ['-t', f\"DEVICE INFO,{self.conf['device_address']}\"]\n output = await self.run_vh(args)\n return \"IN USE BY: NO ONE\" not in output",
"def _is_valid_interface(device, switch, nos_driver):\n for key in device.keys():\n for (speed, interface) in device[key]:\n if not _is_valid_three_tupple(interface):\n return False\n if not _is_valid_interface_speed(speed):\n return False\n return True",
"def check_device_state(self):",
"def is_connected(cls, device_config):\n if \"console_port_name\" in device_config[\"persistent\"]:\n address = device_config[\"persistent\"][\"console_port_name\"]\n else:\n address = device_config[\"persistent\"][\"hub_port_name\"]\n return os.path.exists(address)",
"def switch_to_measurement(self, measurement):\n\n if not self.switching_systems:\n self.log.critical(\n \"No switching systems defined but attempt to switch to measurement {}. \"\n \"Returning dummy True\".format(measurement)\n )\n return True\n\n # First find measurement\n switching_success = False\n self.log.debug(\"Switching to measurement: {!s}\".format(str(measurement)))\n if measurement in self.settings[\"Switching\"][\"Switching_Schemes\"]:\n # When measurement was found\n for device in self.settings[\"Switching\"][\"Switching_devices\"]:\n if (\n device\n in self.settings[\"Switching\"][\"Switching_Schemes\"][measurement]\n ):\n if device in self.devices:\n switch_list = self.settings[\"Switching\"][\"Switching_Schemes\"][\n measurement\n ][device]\n if not switch_list:\n switch_list = []\n if not self.change_switching(self.devices[device], switch_list):\n self.log.error(\n \"Switching to {} was not possible\".format(switch_list)\n )\n return False\n else:\n self.log.error(\n \"Switching device: {} was not found in active resources. No switching done!\".format(\n device\n )\n )\n return False\n else:\n if device in self.devices:\n switch_list = []\n if not self.change_switching(self.devices[device], switch_list):\n self.log.error(\n \"Switching to {} was not possible\".format(switch_list)\n )\n return False\n else:\n self.log.error(\n \"Switching device: {} was not found in active resources. No switching done!\".format(\n device\n )\n )\n return False\n return True\n else:\n self.log.error(\n \"Measurement {} switching could not be found in defined switching schemes.\".format(\n measurement\n )\n )\n return False",
"def check_connectivity(self):\n r = self.run_cmd(\"get-state\")\n return r.startswith(\"device\")",
"def get_switch_open(self) -> bool:\n self.serial.write(b\"G!\")\n switch_open = self.__extract_string(self.__read_response(1)[0], b\"!X\")\n\n return switch_open == \"Switch Open\"",
"def get_switch_status(self) -> bool:\n opened = True\n self.serial.write(b\"F!\")\n response = self.__read_response(1)[0]\n try:\n switch_status = self.__extract_string(response, b\"!X\")\n opened = True\n except:\n switch_status = self.__extract_string(response, b\"!Y\")\n opened = False\n\n closed = not opened\n\n if (opened and switch_status != \"Switch Open\") or (\n closed and switch_status != \"Switch Close\"\n ):\n raise CloudWatcherException(f\"Invalid status {switch_status}\")\n\n return opened",
"def _is_v0x04(self):\n return self.switch.is_connected() and \\\n self.switch.connection.protocol.version == 0x04",
"def is_on(self) -> bool:\n return self.tuya_device.status.get(DPCODE_SWITCH, False)",
"def is_device_connected(device_id):\n try:\n device_name = subprocess.check_output([ADB_EXECUTOR, '-s', device_id, 'shell', 'getprop', 'ro.product.model'])\n device_name = device_name.decode(DEFAULT_CHARSET).replace('\\n', '').replace('\\r', '')\n logger.info('device {} online'.format(device_name))\n except subprocess.CalledProcessError:\n return False\n return True",
"def __checkSwitch ( self, letter, value ):\n\n #-- 1 --\n # [ if letter is a key in self.switchMap -> I\n # else ->\n # sys.stderr +:= (usage message) + (error message)\n # stop execution ]\n if not self.switchMap.has_key ( letter ):\n usage ( self.switchSpecs, self.posSpecs,\n \"No such switch: -%s\" % letter )\n\n #-- 2 --\n if len(value) == 0:\n self.switchMap[letter] = 1\n else:\n self.switchMap[letter] = value",
"def check_if_already_used(self, key):\n for switch in self.new_switches:\n if key == self.new_switches[switch]:\n return True\n return False",
"def async_device_available_fn(controller: UniFiController, obj_id: str) -> bool:\n device = controller.api.devices[obj_id]\n return controller.available and not device.disabled",
"def _openvswitch_switch_dpdk_installed(self):\n cmd = 'dpkg-query -s openvswitch-switch-dpdk'\n for unit in zaza.model.get_units(self.application_name):\n zaza.utilities.juju.remote_run(\n unit.name, cmd, model_name=self.model_name, fatal=True)",
"def health_check(self):\n unset_props = []\n if not self.hub_name:\n unset_props.append(self._hub_name_prop)\n if not self.port_number:\n unset_props.append(self._primary_port_prop)\n if unset_props:\n msg_format = (\"If device is connected to {}, \"\n \"set them via 'gdm redetect {}'\")\n msg = msg_format.format(self.hub_type, self._device_name)\n error_msg = \"properties {} are unset. \".format(\n \" and \".join(unset_props)) + msg\n raise errors.CapabilityNotReadyError(\n msg=error_msg, device_name=self._device_name)\n\n try:\n self._hub = self._create_device_func(self.hub_name)\n # Set up ethernet\n if self.ethernet_switch_address is not None:\n self._ethernet_switch = self._create_device_func(\n self.ethernet_switch_address)\n\n except errors.DeviceError as err:\n raise errors.CapabilityNotReadyError(\n msg=str(err), device_name=self._device_name)\n if self.ethernet_switch_address is not None:\n self._verify_switch_created(self._ethernet_switch)\n self._healthy = True",
"def check_chip_ble_devices_advertising(devCtrl, name, deviceDetails=None):\n ble_chip_device = scan_chip_ble_devices(devCtrl)\n if ble_chip_device is None or len(ble_chip_device) == 0:\n log.info(\"No BLE CHIP device found\")\n return False\n\n chip_device_found = False\n\n for ble_device in ble_chip_device:\n if deviceDetails is not None:\n if (ble_device[\"name\"] == name and\n int(ble_device[\"discriminator\"]) == int(deviceDetails[\"Discriminator\"]) and\n int(ble_device[\"vendorId\"]) == int(deviceDetails[\"VendorID\"]) and\n int(ble_device[\"productId\"]) == int(deviceDetails[\"ProductID\"])):\n chip_device_found = True\n break\n else:\n if (ble_device[\"name\"] == name):\n chip_device_found = True\n break\n\n return chip_device_found",
"def is_connected_drm():\n drm_status = xbee.atcmd(AT_CMD_DI)\n if drm_status is None or drm_status not in drm_status_connected:\n return False\n return True",
"def is_available() -> bool:\n # This function never throws and returns 0 if driver is missing or can't\n # be initialized\n return device_count() > 0",
"def is_switch(G):\n return False",
"def checkstatus(self):\n # define cross-platform /dev/null\n devnull = open(os.devnull, 'w')\n\n # if the OS is windows\n if os.name == 'nt':\n ping = ['ping', '-n', '1', self.device]\n\n # if the OS is posix\n else:\n ping = ['ping', '-c', '1', self.device]\n\n print(self.device + ' Checking for device availability', end='', flush=True)\n time.sleep(5)\n count = 0\n while count < 2:\n print('.', end='', flush=True)\n ping_call = subprocess.Popen(ping, stdout=devnull)\n returncode = ping_call.wait()\n if returncode == 0:\n break\n time.sleep(1)\n count = count + 1\n\n print('')\n if count == 2:\n print(self.device + ' Device is not up')\n print(self.device + ' Exiting...')\n return 'FAIL'\n else:\n print(self.device + ' Device is Online')\n print(self.device + ' Please wait for script initialization')\n time.sleep(5)",
"def _get_switch(self, switch):\n switch = self.switch_by_label(switch)\n id = self.switches[switch.label].id\n # make sure that the serial port is open\n self.assure_serial()\n # create command for the arduino and send it\n input_string = 'r' + str(id[0]) + str(id[1])\n self.serial.write(input_string.encode('ascii'))\n time.sleep(self.READ_DELAY)\n # retrieve result\n result = self.serial.readline().decode().rstrip()\n time.sleep(self.READ_DELAY)\n # store the indicators to the switch\n switch.indicators = (int(result[0]), int(result[1]))\n # raise error if the indicators show an error\n if switch.state is None:\n raise SwitchError(\"Reading the state was unsuccessful: Indicators \"\n f\"of the switch show {switch.indicators}.\")\n return switch.state",
"def is_connected():\n sta_if = network.WLAN(network.STA_IF)\n return sta_if.isconnected()"
] | [
"0.6747511",
"0.6441467",
"0.6203834",
"0.6106376",
"0.60789245",
"0.60287935",
"0.59896916",
"0.58936965",
"0.58648187",
"0.58274436",
"0.5758467",
"0.5728313",
"0.57069063",
"0.5637247",
"0.56260467",
"0.56259376",
"0.56258166",
"0.5610697",
"0.55567384",
"0.55409193",
"0.54879516",
"0.5472512",
"0.54519325",
"0.5422955",
"0.54176646",
"0.53867304",
"0.53742343",
"0.5356593",
"0.53490645",
"0.5339355"
] | 0.7844338 | 0 |
Return True if entity is available. | def available(self) -> bool:
is_avail = True
if self.entity_description.available_fn is not None:
is_avail = self.entity_description.available_fn(self._wrap_device)
return self._api.available and is_avail | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def available(self) -> bool:\n return super().available and (\n self.coordinator.data.get(self.entity_description.key) is not None\n )",
"def available(self) -> bool:\n if self.entity_description.always_available:\n return True\n return self.knx.xknx.connection_manager.state is XknxConnectionState.CONNECTED",
"def available(self) -> bool:\n raise NotImplementedError",
"def _enabled_entity_exists(self) -> bool:\n return self.entity_exists(self._enabled_toggle_entity_id)",
"def available(self):\n return True",
"def available(self):\n return True",
"def is_available(self) -> bool:\n raise NotImplementedError",
"def available(self) -> bool:\n return True",
"def available(self) -> bool:\n return True",
"def available(self) -> bool:\n return self._is_available",
"def available(self) -> bool:\n if self._coordinator and not self._coordinator.last_update_success:\n return False\n return self.rest.data is not None",
"def available(self):\n\t\t\treturn False",
"def available(self):\n\t\t\treturn False",
"def available(self):\n\t\t\treturn False",
"def available(self) -> bool:\n return self._api.available",
"def available(self):\n\t\t\treturn True",
"def available(self):\n\t\t\treturn True",
"def available(self):\n\t\t\treturn True",
"def get_available(self) -> bool:\n return self._available",
"def available(self):\n return True if self._device.status == \"AVAILABLE\" else False",
"def available(self) -> bool:\n return super().available and bool(self.data)",
"def available(self) -> bool:\n return self._product and self._product.online",
"def available(self) -> bool:\n return pulumi.get(self, \"available\")",
"def available(self) -> bool:\n return self._available",
"def available(self) -> bool:\n return self._available",
"def available(self) -> bool:\n return self._available",
"def available(self) -> bool:\n return self._available",
"def available(self) -> bool:\n return self._available",
"def is_available(self) -> bool:\n raise NotImplementedError() # pragma: nocover",
"def available(self) -> bool:\n return self._device.available"
] | [
"0.8291221",
"0.8025913",
"0.7288025",
"0.724847",
"0.7247756",
"0.7247756",
"0.72361225",
"0.7186925",
"0.7186925",
"0.71700346",
"0.7118156",
"0.7103858",
"0.7103858",
"0.7103858",
"0.71023947",
"0.7093814",
"0.7093814",
"0.7093814",
"0.70866835",
"0.7074035",
"0.70578057",
"0.7024777",
"0.7013751",
"0.70124316",
"0.70124316",
"0.70124316",
"0.70124316",
"0.70124316",
"0.7011908",
"0.69873095"
] | 0.8099782 | 1 |
Get current switch state | def _get_switch_state(self):
if self.entity_description.value_fn is not None:
return self.entity_description.value_fn(self._wrap_device)
if self._api.state:
feature = self.entity_description.key
return self._api.state.device_features.get(feature)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_state(self):\n return self.controller.get_state()",
"def get_current_state(self):\n return self._current_state",
"def get_current_state(self):\n return self.game.get_current_state()",
"def state(self) -> bool:\n return self.get_state(self.entity_ids[\"switch\"])",
"def read_switch(self):\n return GPIO.input(SWITCH_PIN)",
"def get_state(self):\n return self.state",
"def get_state(self):\n return self.state",
"def GetState(self):\r\n \r\n return self.state",
"def _get_state(self):\n fw_wp_en = (self._interface.get('fw_wp_en') == 'on')\n fw_wp = (self._interface.get('fw_wp') == 'on')\n if fw_wp_en:\n return self._STATE_FORCE_ON if fw_wp else self._STATE_FORCE_OFF\n else:\n return self._STATE_ON if fw_wp else self._STATE_OFF",
"def get(self):\n if self.mode == gpio.IN:\n self.state = gpio.input(self.bcm_id)\n\n return self.state",
"def state(self):\n # None will return False\n return bool(self.switch.value)",
"def get_switch_state(self, path, params):\n switch = params.get('switch')\n port = params.get('port')\n host = self._extract_url_base(path)\n reply = self._faucet_collector.get_switch_state(switch, port, host)\n self._augment_state_reply(reply, path)\n return reply",
"def get_state(self):\n return self.wm.state if self.wm else None",
"def get_state(self):\r\n alarm = self._alarm()\r\n return alarm.state",
"def get_state(self):\n pass",
"def getState(self):\r\n return self._get_SS_State()#self.currentState\r",
"def get_state(self):\n return ONEUP_STATES[self.state][0]",
"def currentState(self):\n return self.currentState",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state",
"def _get_state(self):\n return self.__state"
] | [
"0.74889183",
"0.73827314",
"0.73482925",
"0.7328623",
"0.7303987",
"0.72895473",
"0.72895473",
"0.7262649",
"0.72144073",
"0.72110635",
"0.71496564",
"0.7141653",
"0.71394473",
"0.71338475",
"0.7104394",
"0.7095693",
"0.70940304",
"0.70775414",
"0.7067311",
"0.7067311",
"0.7067311",
"0.7067311",
"0.7067311",
"0.7067311",
"0.7067311",
"0.7067311",
"0.7067311",
"0.7067311",
"0.7067311",
"0.7067311"
] | 0.7429907 | 1 |
Takes a List of Tensors and returns a List of mask Tensor with 1 if the input was all zeros (on dimension 2) and 0 otherwise. This is used in the Attention layer to mask the padding observations. | def get_zero_entities_mask(entities: List[torch.Tensor]) -> List[torch.Tensor]:
with torch.no_grad():
if exporting_to_onnx.is_exporting():
with warnings.catch_warnings():
# We ignore a TracerWarning from PyTorch that warns that doing
# shape[n].item() will cause the trace to be incorrect (the trace might
# not generalize to other inputs)
# We ignore this warning because we know the model will always be
# run with inputs of the same shape
warnings.simplefilter("ignore")
# When exporting to ONNX, we want to transpose the entities. This is
# because ONNX only support input in NCHW (channel first) format.
# Barracuda also expect to get data in NCHW.
entities = [
torch.transpose(obs, 2, 1).reshape(
-1, obs.shape[1].item(), obs.shape[2].item()
)
for obs in entities
]
# Generate the masking tensors for each entities tensor (mask only if all zeros)
key_masks: List[torch.Tensor] = [
(torch.sum(ent ** 2, axis=2) < 0.01).float() for ent in entities
]
return key_masks | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_attention_mask(input_ids): \n attention_masks = [] \n\n # 1 for input and 0 for pad\n for seq in input_ids: \n attention_masks.append([float(i>0) for i in seq])\n\n return attention_masks",
"def get_padding_mask(inputs, padding_value=0):\n mask = tf.cast(tf.equal(inputs, padding_value), 'float32') \n mask = mask[:, tf.newaxis, tf.newaxis, :]\n return mask",
"def attention_mask(x):\n mask = torch.zeros(len(x), len(x[0]))\n for i in range(len(x)):\n try:\n index = np.where(x[i]==1)[0][0]\n mask[i][index:] = -np.inf\n except:\n pass\n return mask",
"def padding_mask(lens):\n bs, max_len = len(lens), max(lens)\n mask = torch.zeros(bs, 1, max_len)\n for i, l in enumerate(lens):\n mask[i, :, :l] = 1\n mask = mask > 0\n return mask",
"def generate_padding_masks(data, pad_value=0):\n with torch.no_grad():\n mask = (data == pad_value).to(data.device).t().unsqueeze(1)\n return mask",
"def get_mask(tensor, padding_idx=0):\n mask = torch.ones(size=list(tensor.size()), dtype=torch.bool)\n mask[tensor == padding_idx] = False \n\n return mask",
"def _tf_mask(self, feats: th.Tensor) -> List[th.Tensor]:\n proj = self.dfsmn(feats, None)[0]\n # N x S*F x T\n masks = self.masks(proj)\n # [N x F x T, ...]\n return th.chunk(masks, self.num_branchs, 1)",
"def create_mask(neurons: list):\n\n return np.zeros_like(neurons[0])",
"def generate_mask(input_tensor: torch.Tensor, sequence_lengths: torch.LongTensor) -> torch.Tensor:\n assert input_tensor.size()[0] == sequence_lengths.size()[0], \\\n f\"Batch size {input_tensor.size()[0]} != number of provided lengths {sequence_lengths.size()[0]}.\"\n\n mask = torch.ones_like(input_tensor, dtype = torch.bool)\n for i, length in enumerate(sequence_lengths):\n mask[i][:, length:] = False\n\n return mask",
"def create_padding_mask(seq):\r\n seq = tf.cast(tf.math.equal(seq, 0), tf.float32)\r\n return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)\r",
"def compute_mask(self, inputs, mask=None):\n if mask is None:\n return None\n if not isinstance(mask, list):\n raise ValueError('`mask` should be a list.')\n if not isinstance(inputs, list):\n raise ValueError('`inputs` should be a list.')\n if len(mask) != len(inputs):\n raise ValueError('The lists `inputs` and `mask` '\n 'should have the same length.')\n if mask[0] is not None:\n raise ValueError('Attention mask should be None.')\n if mask[1] is None:\n return None\n return K.any(mask[1], axis=-1)",
"def apply_mask_to_inputs(self, inputs: tf.Tensor, schema: tf.Tensor) -> tf.Tensor:\n inputs = tf.where(\n tf.cast(tf.expand_dims(schema, -1), tf.bool),\n inputs,\n tf.cast(self.masked_item_embedding, dtype=inputs.dtype),\n )\n return inputs",
"def generate_visual_features_padding_masks(data, pad_value=0):\n with torch.no_grad():\n return (data == pad_value).all(dim=-1).t().to(data.device).unsqueeze(1)",
"def _make_masks(ilens, olens):\n # (B, T_in)\n in_masks = make_non_pad_mask(ilens)\n # (B, T_out)\n out_masks = make_non_pad_mask(olens)\n # (B, T_out, T_in)\n\n return paddle.logical_and(\n out_masks.unsqueeze(-1), in_masks.unsqueeze(-2))",
"def make_padding_mask(input_ids, padding_idx=1):\r\n padding_mask = input_ids.eq(padding_idx)\r\n if not padding_mask.any():\r\n padding_mask = None\r\n return padding_mask",
"def input_mask(self):\n inputs = self.input\n if isinstance(inputs, list):\n return [getattr(x, '_keras_mask', None) for x in inputs]\n else:\n return getattr(inputs, '_keras_mask', None)",
"def make_mask(data, pad):\n\n def subsequent_mask(size):\n \"\"\" helper function for creating the masks. \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n mask = (data != pad).unsqueeze(-2)\n mask = mask & Variable(\n subsequent_mask(data.size(-1)).type_as(mask.data))\n return mask",
"def mask(self):\n return list(self._mask_generator())",
"def make_mask(data, pad):\n def subsequent_mask(size):\n \"\"\" helper function for creating the masks. \"\"\"\n attn_shape = (1, size, size)\n subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')\n return torch.from_numpy(subsequent_mask) == 0\n\n mask = (data != pad).unsqueeze(-2)\n mask = mask & Variable(\n subsequent_mask(data.size(-1)).type_as(mask.data))\n return mask",
"def apply_mask(data, mask_func, seed=None):\n shape = np.array(data.shape)\n shape[:-3] = 1\n mask = mask_func(shape, seed)\n return torch.where(mask == 0, torch.Tensor([0]), data), mask",
"def apply_masks(masks):\n masks = ee.List(masks) if isinstance(masks, list) else masks\n first = ee.Image.constant(0)\n\n def compute(mask, first):\n first = ee.Image(first)\n return first.Or(mask)\n\n bad_pixels = ee.Image(masks.iterate(compute, first))\n good_pixels = bad_pixels.Not()\n\n return good_pixels",
"def batch_boolean_mask(mask):\n # [batch_size, num_values]\n mask = tf.to_int32(mask)\n\n # [batch_size]\n num_true = tf.reduce_sum(mask, 1)\n\n # []\n max_true = tf.reduce_max(num_true)\n\n # [batch_size, max_true]\n gathered_mask, true_indices = tf.nn.top_k(mask, max_true)\n gathered_mask = tf.cast(gathered_mask, tf.bool)\n\n return gathered_mask, true_indices",
"def compute_mask(self, inputs, mask=None):\n if self.padding != \"same\":\n raise ValueError(\"Padding mode '%s' not yet supported\" % (\n self.padding,))\n return mask",
"def get_positive_mask(labels):\n batch_shape = tf.shape(labels)[0]\n mask_1 = tf.logical_not(get_negative_mask(labels))\n mask_2 = tf.logical_not(tf.eye(batch_shape, dtype=tf.bool))\n return tf.logical_and(mask_1, mask_2)",
"def zero_by_mask(mask, vals, replace_with=0.0):\n assert mask.dtype == tf.as_dtype(np.bool)\n ms = mask.get_shape().as_list()\n vs = vals.get_shape().as_list()\n mask = tf.ensure_shape(mask, vs[:-1] + [1])\n vals = tf.ensure_shape(vals, ms[:-1] + [vs[-1]])\n vals = tf.where_v2(mask, vals, replace_with)\n return vals",
"def gen_masks(num_masks, features, hidden_layers, hidden_units):\n\n # This array should contain numbers 1-784\n features_indices = []\n for i in range(features):\n features_indices.append(i + 1)\n masks = []\n indices = []\n for i in range(num_masks):\n set_masks = [] # Will contain all masks for the set\n # Randomize the input (and output, since they have to be the same)\n # ordering\n set_features = [] # Input and output node indices for the set\n for index in features_indices:\n set_features.append(index)\n np.random.RandomState(np.random.randint(0, 2**32)).shuffle(\n set_features)\n indices.append(set_features)\n prev_indices = set_features\n for j in range(hidden_layers):\n layer_indices = []\n for k in range(hidden_units):\n # The hidden nodes' indices need to be between the minimum\n # index from the previous layer and one less than the number\n # of features, inclusive.\n layer_indices.append(np.random.randint(low=min(prev_indices),\n high=features))\n mask = np.zeros((len(prev_indices), len(layer_indices)),\n dtype=np.float32)\n for k in range(len(prev_indices)):\n for l in range(len(layer_indices)):\n # The mask value will be one when the autoregressive\n # condition is met.\n mask[k][l] = float(int(prev_indices[k] <= layer_indices[l]))\n mask = tf.convert_to_tensor(mask, dtype=tf.float32)\n set_masks.append(mask)\n prev_indices = layer_indices\n output_mask = np.zeros((len(prev_indices), features), dtype=np.float32)\n for j in range(len(prev_indices)):\n for k in range(len(set_features)):\n output_mask[j][k] = float(int(prev_indices[j] < set_features[k]))\n output_mask = tf.convert_to_tensor(output_mask, dtype=tf.float32)\n set_masks.append(output_mask)\n direct_mask = np.zeros((features, features), dtype=np.float32)\n for j in range(features):\n for k in range(features):\n direct_mask[j][k] = float(int(set_features[j] < set_features[k]))\n direct_mask = tf.convert_to_tensor(direct_mask, dtype=tf.float32)\n set_masks.append(direct_mask)\n masks.append(set_masks)\n return{'masks': masks, 'indices': indices}",
"def _GetDefaultPaddings(self, inputs):\n return tf.zeros(\n tf.concat([tf.shape(inputs)[:-1], [1]], 0), dtype=inputs.dtype)",
"def _GetDefaultPaddings(self, inputs):\n return tf.zeros(\n tf.concat([tf.shape(inputs)[:-1], [1]], 0), dtype=inputs.dtype)",
"def create_mask(shape):\n return np.zeros(shape).astype(bool)",
"def make_pad_mask(lengths:list, xs:torch.Tensor=None, length_dim:int=-1):\n if length_dim == 0:\n raise ValueError(\"length_dim cannot be 0: {}\".format(length_dim))\n\n if not isinstance(lengths, list):\n lengths = lengths.tolist()\n bs = int(len(lengths))\n if xs is None:\n maxlen = int(max(lengths))\n else:\n maxlen = xs.size(length_dim)\n\n seq_range = torch.arange(0, maxlen, dtype=torch.int64)\n seq_range_expand = seq_range.unsqueeze(0).expand(bs, maxlen)\n seq_length_expand = seq_range_expand.new(lengths).unsqueeze(-1)\n mask = seq_range_expand >= seq_length_expand\n\n if xs is not None:\n assert xs.size(0) == bs, (xs.size(0), bs)\n\n if length_dim < 0:\n length_dim = xs.dim() + length_dim\n ind = tuple(\n slice(None) if i in (0, length_dim) else None for i in range(xs.dim())\n )\n mask = mask[ind].expand_as(xs).to(xs.device)\n return mask"
] | [
"0.7192867",
"0.68340164",
"0.6609743",
"0.651477",
"0.64316094",
"0.63892573",
"0.62922674",
"0.6264108",
"0.6230588",
"0.62144625",
"0.61776084",
"0.61677283",
"0.61438",
"0.6140973",
"0.61276275",
"0.611358",
"0.6040983",
"0.6023705",
"0.6021974",
"0.60033125",
"0.59760505",
"0.5943838",
"0.593362",
"0.5890884",
"0.58860064",
"0.58843315",
"0.5880836",
"0.5880836",
"0.5871428",
"0.58468974"
] | 0.684822 | 1 |
Load the configuration file that manage raw data. conf is a dictionary | def load_config_raw_data(conf):
path = Path(conf["conf_raw_data"])
with open(path) as f:
txt = f.read()
conf = json.loads(txt)
return conf | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_from_conf(self):\r\n raise NotImplementedError",
"def load_from_conf(self):\n raise NotImplementedError",
"def loadConf(self):\n\n with open(self.configFile) as f:\n self.config = json.load(f)",
"def load_conf(self):\n self._read_uconf()",
"def _load_conf(self, conf):\n f = open(self.file, \"w\")\n f.write(conf)\n f.close()",
"def load_config(self):\n if os.path.exists(self.config_file):\n with open(self.config_file) as f:\n conf = json.load(f)\n\n self.update_attributes_from_config(conf)",
"def load_conf(self, filename):\n\n path = \"./source/_0_time_series_class/configuration/\"\n filename = path + filename\n \n with open(filename) as file:\n self.conf = json.loads(file.read())",
"def load_conf():\n if os.path.exists(CONF_FILE):\n with open(CONF_FILE, 'r') as infile:\n return json.load(infile)\n else:\n return {}",
"def load(self):\n with open(self.conf_fname, \"r\") as fd:\n config = json.load(fd)\n \n return config",
"def __read_config(self):\n with open(self.config_file, 'r') as data_file:\n dict = json.load(data_file)\n self.ibooks_doc_root = dict[\"ibooks_doc_root\"]\n self.library_folder = dict[\"library_folder\"]\n self.annotation_folder = dict[\"annotation_folder\"]\n self.tmp_dir = dict[\"tmp_dir\"]",
"def load_config(self):\r\n with open('config.json', 'r') as f:\r\n self.config = json.load(f)",
"def load_config(self):\n pass",
"def _load_from_conf(self, parser, section, db, conf_dir, cloud_confs, conf_file):\n\n iaas = config_get_or_none(parser, section, \"iaas\", self.iaas)\n iaas_url = config_get_or_none(parser, section, \"iaas_url\", self.iaas_url)\n\n sshkey = config_get_or_none(parser, section, \"sshkeyname\", self.keyname)\n localssh = config_get_or_none(parser, section, \"localsshkeypath\", self.localkey)\n ssh_user = config_get_or_none(parser, section, \"ssh_username\", self.username)\n scp_user = config_get_or_none(parser, section, \"scp_username\", self.scp_username)\n bootconf = config_get_or_none(parser, section, \"bootconf\", self.bootconf)\n bootpgm = config_get_or_none(parser, section, \"bootpgm\", self.bootpgm)\n bootpgm_args = config_get_or_none(parser, section, \"bootpgm_args\", self.bootpgm_args)\n hostname = config_get_or_none(parser, section, \"hostname\", self.hostname)\n readypgm = config_get_or_none(parser, section, \"readypgm\", self.readypgm)\n readypgm_args = config_get_or_none(parser, section, \"readypgm_args\", self.readypgm_args)\n iaas_key = config_get_or_none(parser, section, \"iaas_key\", self.iaas_key)\n iaas_secret = config_get_or_none(parser, section, \"iaas_secret\", self.iaas_secret)\n securitygroups = config_get_or_none(parser, section, \"securitygroups\", self.securitygroups)\n\n terminatepgm = config_get_or_none(parser, section, \"terminatepgm\", self.terminatepgm)\n terminatepgm_args = config_get_or_none(parser, section, \"terminatepgm_args\", self.terminatepgm_args)\n\n pgm_timeout = config_get_or_none(parser, section, \"pgm_timeout\", self.pgm_timeout)\n\n local_exe = config_get_or_none_bool(parser, section, \"local_exe\", self.local_exe)\n\n\n allo = config_get_or_none(parser, section, \"allocation\", self.allocation)\n image = config_get_or_none(parser, section, \"image\", self.image)\n cloudconf = config_get_or_none(parser, section, \"cloud\")\n if cloudconf:\n try:\n conf = cloud_confs[cloudconf]\n except:\n raise APIUsageException(\"%s is not a valud cloud description in this plan\" % (cloudconf))\n\n if not iaas:\n iaas = conf.iaas\n if not iaas_url:\n iaas_url = conf.iaas_url\n if not sshkey:\n sshkey = conf.sshkey\n if not localssh:\n localssh = conf.localssh\n if not ssh_user:\n ssh_user = conf.ssh_user\n if not scp_user:\n scp_user = conf.scp_user\n if not iaas_key:\n iaas_key = conf.iaas_key\n if not iaas_secret:\n iaas_secret = conf.iaas_secret\n if not securitygroups:\n securitygroups = conf.securitygroups\n\n if not iaas:\n iaas = db.default_iaas\n if not iaas_url:\n iaas_url = db.default_iaas_url\n if not allo:\n allo = db.default_allo\n if not sshkey:\n sshkey = db.default_sshkey\n if not localssh:\n localssh = db.default_localssh\n if not ssh_user:\n ssh_user = db.default_ssh_user\n if not scp_user:\n scp_user = db.default_scp_user\n if not iaas_key:\n iaas_key = db.default_iaas_key\n if not iaas_secret:\n iaas_secret = db.default_iaas_secret\n if not securitygroups:\n securitygroups = db.default_securitygroups\n if not image:\n image = db.default_image\n if not bootconf:\n bootconf = db.default_bootconf\n if not bootpgm:\n bootpgm = db.default_bootpgm\n if not bootpgm_args:\n bootpgm_args = db.default_bootpgm_args\n if not readypgm:\n readypgm = db.default_readypgm\n if not readypgm_args:\n readypgm_args = db.default_readypgm_args\n if not terminatepgm:\n terminatepgm = db.default_terminatepgm\n if not terminatepgm_args:\n terminatepgm_args = db.default_terminatepgm_args\n if not pgm_timeout:\n pgm_timeout = db.default_pgm_timeout\n\n if not local_exe:\n local_exe = db.default_local_exe\n\n\n self.image = image\n self.bootconf = _resolve_file_or_none(conf_dir, bootconf, conf_file)\n self.bootpgm = _resolve_file_or_none(conf_dir, bootpgm, conf_file, has_args=True)\n self.bootpgm_args = bootpgm_args\n self.terminatepgm = _resolve_file_or_none(conf_dir, terminatepgm, conf_file, has_args=True)\n self.terminatepgm_args = terminatepgm_args\n self.pgm_timeout = pgm_timeout\n self.local_exe = local_exe\n\n self.hostname = hostname\n self.readypgm = _resolve_file_or_none(conf_dir, readypgm, conf_file, has_args=True)\n self.readypgm_args = readypgm_args\n self.username = ssh_user\n self.scp_username = scp_user\n self.localkey = _resolve_file_or_none(conf_dir, localssh, conf_file)\n self.keyname = sshkey\n self.allocation = allo\n self.iaas = iaas\n self.iaas_url = iaas_url\n\n self.iaas_secret = iaas_secret\n self.iaas_key = iaas_key\n self.securitygroups = securitygroups\n\n x = config_get_or_none(parser, section, \"iaas_launch\")\n if x:\n if x.lower() == 'true':\n self.iaas_launch = True\n else:\n self.iaas_launch = False\n else:\n if self.hostname:\n self.iaas_launch = False\n else:\n self.iaas_launch = True\n\n # allow the plan to over ride the default image if they want to use a hostname\n if self.iaas_launch is False:\n self.image = None\n\n item_list = parser.items(section)\n deps_list = []\n for (ka,val) in item_list:\n ndx = ka.find(\"deps\")\n if ndx == 0:\n deps_list.append(ka)\n deps_list.sort()\n for i in deps_list:\n deps = config_get_or_none(parser, section, i)\n deps_file = _resolve_file_or_none(conf_dir, deps, conf_file)\n if deps_file:\n parser2 = ConfigParser.ConfigParser()\n parser2.read(deps_file)\n keys_val = parser2.items(\"deps\")\n for (ka,val) in keys_val:\n val2 = config_get_or_none(parser2, \"deps\", ka)\n if val2 is not None:\n bao = BagAttrsObject(ka, val2)\n self.attrs.append(bao)",
"def load(self):\n try:\n _config_file = open(self.config, 'r+')\n data = json.loads(_config_file.read())\n except (ValueError, IOError):\n data = {}\n\n self.update(data)",
"def load( self ):\n ini = codecs.open(self.filename,\"r\",\"utf-8\",errors=\"replace\",buffering=0)\n for l in ini:\n l = l.strip()\n if l:\n (name,value) = l.split(\"=\",1)\n self.conf[name.strip()] = value.strip()\n ini.close()",
"def load_data_conf(self):\n data_file = select_file(os.getcwd())\n if data_file is not None:\n self.load_tab(data_file)\n else:\n msg_window('please select valid data config file')",
"def _load_config(self, conf):\n\t\tself.log.info(\"Loading configuration file...\")\n\n\t\tself.host = conf.get('host', None)\n\t\tself.port = conf.get('port', None)\n\t\tself.password = conf.get('password', None)\n\t\tself.conf_commands = conf.get('commands', None)\n\n\t\tif( self.host is None\n\t\t\t or self.port is None\n\t\t\t or self.password is None\n\t\t\t or self.conf_commands is None):\n\t\t\traise KeyError(\"Could not initialize OBS Client, missing host, port, password, or conf_commands!\")\n\n\t\tself.log.info(\"...Loaded configuration file.\")",
"def _read_uconf(self):\n fname = self.gen_conf.conf_file_name\n fdir = self.gen_conf.conf_file_path\n fpath = os.path.join(fdir, fname)\n \n from pathlib import Path\n cfile = Path(fpath) \n \n if cfile.exists() & cfile.is_file():\n\n self._load_uconf(fpath)\n \n else:\n if cfile.exists():\n raise Exception(\"Configuration file \"+fpath+\" seems to exist\"+\n \" but it is not a file\")\n else:\n print(\"Warning: Configuration file \"+fpath+\" does not exit\")\n print(\"Warning: Placing a default configuration are using it\")\n \n import pkg_resources\n\n resource_package = \"quantarhei\" # Could be any module/package name\n resource_path = '/'.join(('core', 'conf', 'qrhei.py')) \n content = pkg_resources.resource_string(resource_package,\n resource_path)\n\n with open(fpath, \"w\") as f:\n f.write(content.decode(\"utf-8\"))\n \n self._load_uconf(fpath)\n \n #printlog(\"Configuration file: \", fpath, \"loaded\", loglevel=9) ",
"def read_config(self, config_filename):",
"def load_config(self):\n with open(self.TEMPERATURE_CONFIG_FILE_PATH, 'r') as file:\n self.config = json.load(file)",
"def get_config_raw(conf, confvar = 'conf', lconf = None, fexec = True):\n # open and read config file containing a python dictionary\n try:\n s_ = open(conf, \"r\").read()\n except Exception as e:\n print(e)\n sys.exit(1)\n\n # compile and evaluate the dictionary code string and return the dict object\n if fexec:\n return get_config_raw_from_string(s_, confvar = confvar, lconf = lconf)\n # or just return the string\n else:\n return s_",
"def load_conf(self):\n\n self.load_file(self.ini_file)\n self.files = []\n conf_file = open(self.ini_file, \"r\")\n for l in conf_file:\n self.files.append(l.strip())\n conf_file.close()",
"def load_dict(conf):\n conf_dict = dict()\n fr = io.open(conf, 'r', encoding=\"utf8\")\n for line in fr:\n line = line.strip()\n elems = line.split('\\t')\n if elems[0] not in conf_dict:\n conf_dict[elems[0]] = []\n conf_dict[elems[0]].append(elems[1])\n return conf_dict",
"def init_config(self):\n with open(self.config_file, 'r') as fh:\n self.config = json.load(fh, object_pairs_hook=OrderedDict)\n logger.info('Config loaded: %s' % os.path.abspath(self.config_file))",
"def init_config(self):\n with open(self.config_file, 'r') as fh:\n self.config = json.load(fh, object_pairs_hook=OrderedDict)\n logger.info('Config loaded: %s' % os.path.abspath(self.config_file))",
"def load_data_from_config(self):\n\n config_file_name = \"cicada/config/config.yaml\"\n config_dict = None\n self.labels = []\n self.to_add_labels = []\n if os.path.isfile(config_file_name):\n with open(config_file_name, 'r') as stream:\n config_dict = yaml.safe_load(stream)\n print(f\"config_dict {config_dict}\")\n if (config_dict is not None) and config_dict.get(\"dir_name\"):\n self.load_data_from_dir(dir_name=config_dict[\"dir_name\"], method='clear')",
"def get_config_file(conf):\n with open(conf['config'], 'r') as f:\n saved_conf = json.load(f)\n for key, value in conf.items():\n if value is not None:\n saved_conf[key] = value\n return saved_conf",
"def load_config(self):\n conf_file = os.path.join(self._conf_dir, \"dql.json\")\n if not os.path.exists(conf_file):\n return {}\n with open(conf_file, \"r\") as ifile:\n return json.load(ifile)",
"def _load_config():\n fname = _get_config_fname()\n if fname is None or not op.isfile(fname):\n return dict()\n with open(fname, 'r') as fid:\n config = json.load(fid)\n return config",
"def load(cls):\n cls._api_key = \"\"\n cls._token = \"\"\n data = None\n\n try:\n data = literal_eval(cls.config_file.read_text())\n cls._api_key = data[\"key\"]\n cls._token = data[\"token\"]\n except Exception:\n pass\n\n return data"
] | [
"0.7817127",
"0.77862626",
"0.7596874",
"0.74215615",
"0.73315775",
"0.7204006",
"0.7175266",
"0.71260387",
"0.6957915",
"0.6763186",
"0.6753021",
"0.6738471",
"0.6700064",
"0.6662929",
"0.6654338",
"0.6623613",
"0.66206634",
"0.66160893",
"0.6608538",
"0.66063255",
"0.65559274",
"0.65411276",
"0.64762783",
"0.64616984",
"0.64616984",
"0.64484495",
"0.6444935",
"0.644002",
"0.64302605",
"0.63919556"
] | 0.80604255 | 0 |
Load as a pandas Dataframe the table specified by the name 'table' (string). Must match one of the keys in the \ conf raw data file | def load_raw_table(conf, table):
confrd = load_config_raw_data(conf)
path_table = Path(confrd[table]["path"])
sep = confrd[table]["sep"]
encoding = confrd[table]["encoding"]
df = pd.read_csv(path_table, sep=sep, encoding=encoding)
return df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_table(**kargs):\n from transformer import dehyphenate\n sep = LoincMTRT.delimit # kargs.get('sep', ',')\n input_dir = kargs.get('input_dir', 'data')\n dehyphen = kargs.get('dehyphenate', True)\n deq = kargs.get('dequote', True)\n one_to_one = kargs.get('one_to_one', True)\n\n df = dp.load_generic(input_file=LoincMTRT.table, sep=sep, input_dir=input_dir) \n if dehyphen: \n df = dehyphenate(df, col=LoincMTRT.col_key) # inplace\n # 12345-7 or 123457 \n df = df.drop_duplicates(keep='last') # drop duplicates\n\n if deq: \n df = dequote(df, col=LoincMTRT.col_value)\n\n if one_to_one: \n df = LoincMTRT.resolve_duplicates(df, verbose=1)\n\n return df",
"def load(file):\n return pq.read_table(file).to_pandas()",
"def read_table(file_name: Union[str, Path], **kwargs):\n\tfile_name = Path(file_name)\n\textension = file_name.suffix\n\tdefault_args = {\n\t\t'.csv': {'delimiter': ','},\n\t\t'.tsv': {'delimiter': '\\t'}\n\t}\n\n\t# arguments = self._cleanArguments(extension, arguments)\n\tfile_name = str(file_name.absolute())\n\tif extension in {'.xls', '.xlsx', '.xlsm'}: # .xlsm is not a typo.\n\n\t\tdf = pandas.read_excel(file_name, **kwargs)\n\telif extension in {'.csv', '.tsv', '.fsv', '.txt'}:\n\t\targuments = {**default_args.get(extension), **kwargs}\n\t\tif 'sheetname' in arguments: arguments.pop('sheetname')\n\t\tdf = pandas.read_table(file_name, **arguments)\n\telif extension == '.pkl':\n\t\tdf = pandas.read_pickle(file_name)\n\telse:\n\t\traise NameError(\"{} does not have a valid extension!\".format(file_name))\n\treturn df",
"def load_table(conn, table_name):\n return pd.read_sql_query(\"SELECT * FROM \" + table_name, conn)",
"def _read_tab(pth):\n if not os.path.exists(pth):\n raise SampleTableFileException(\n \"File does not exist: {}\".format(pth))\n read_csv_kwargs = {\"engine\": \"python\", \"dtype\": str,\n \"index_col\": False, \"keep_default_na\": False,\n \"na_values\": [\"\"]}\n return pd.read_csv(pth, sep=infer_delimiter(pth), **read_csv_kwargs)",
"def load_table(self, db_name, table_name, **kwargs):\n\n # Create Connection\n engine, connection = self.create_connection(db_name)\n\n # Check if table exists and read\n if engine.dialect.has_table(engine, table_name):\n sql = 'SELECT * FROM %s' % table_name\n\n # Prevent duplicate keys\n kwargs.pop(\"sql\", None)\n kwargs.pop(\"con\", None)\n kwargs.pop(\"coerce_float\", None)\n\n result = pd.read_sql(sql=sql, con=connection, coerce_float=True, **kwargs)\n else:\n print(table_name, \"does not exist\")\n result = None\n\n # Close connection\n connection.close()\n\n return result",
"def load_table_as_pd(conn, tablename: str):\n # get table as a pandas dataframe\n statement = f\"\"\"\n SELECT *\n FROM '{tablename}';\n \"\"\"\n df = pd.read_sql_query(statement, conn)\n return df",
"def read(tablename: str()):\n return pd.read_csv(tablename, dtype={'source_id': str})",
"def load_data(database_filepath, table_name):\r\n # instance to the database engine\r\n engine = create_engine('sqlite:///{}'.format(database_filepath))\r\n\r\n # read form the database table\r\n df = pd.read_sql_table(table_name, con=engine)\r\n\r\n return df # return our df\r",
"def load_schema_for_modelling():\n filename = \"modelling_schema.csv\"\n folder = os.path.abspath(os.path.dirname(__file__))\n path = os.path.join(folder, filename)\n return pd.read_csv(path).set_index('table_name')",
"def load(cls, table_name: str, index_col: str = \"operator\"):\n # df = Operator_Table.df\n # df.operator = df.operator.apply(sp.normalize)\n # df.operator_alias = df.operator_alias.apply(sp.normalize)\n # df = df.rename(columns={\"operator_alias\": \"alias\"})\n try:\n import models\n\n cnxn = models.connect_db()\n cnxn[\"Base\"].prepare(Base.metadata.bind)\n op = Operator\n op.cnames()\n # TODO: Connect this up\n\n except KeyError:\n raise KeyError(\n f\"Backend has no column named '{index_col}'. Try passing 'index_col = column_name' to the backend constructor. Available columns are: {df.columns.tolist()}\"\n )\n return df",
"def _load_table(table: Model, directory: Path, format_: str):\n\n if directory is not None:\n print(f\" Loading {table.table_name()}...\")\n in_file = Path(directory) / f\"{table.table_name()}.{format_}\"\n dataset = tablib.Dataset(headers=table.fields()).load(in_file.read_text())\n print(f\" Importing {table.table_name()} into the database...\")\n table.insert_many(dataset.dict).execute()\n print(\" Done.\")\n print(\"=====================\")\n else:\n pass\n # print(dataset.export(\"csv\"))",
"def open_data(table):\n engine = create_engine(myDB, encoding='latin1') \n conn = engine.connect()\n select = conn.execute('select * from ' + table)\n\n df = pd.DataFrame(select.fetchall()) \n df.columns = select.keys()\n\n conn.close()\n return df",
"def table_to_dataframe(file):\n columns = ['instrument', 'dataset', 'flowcell', 'well', \n 'well_tile', 'cell', 'blob', 'position_i', 'position_j',\n 'read', 'quality']\n\n columns_drop = ['instrument', 'flowcell', 'dataset', 'well_tile']\n\n df = pd.read_csv(file, sep='\\s+', header=None, quoting=3)\n df.columns = columns\n df['tile'] = df['well_tile'] % 1000\n df = df.drop(columns_drop, axis=1)\n return df",
"def read_table(cls, filepath_or_buffer, *args, **vargs):\n if filepath_or_buffer.endswith('.csv') and 'sep' not in vargs:\n vargs['sep'] = ','\n df = pandas.read_table(filepath_or_buffer, *args, **vargs)\n labels = df.columns\n return Table([df[label].values for label in labels], labels)",
"def load_table_currency(conn, currency: str):\n # if there is no such table, generate new one\n if not check_table_exist(conn, f\"Rates{currency}\"):\n gen_table_for_currency(conn, currency)\n # get table as a pandas dataframe\n statement = f\"\"\"\n SELECT *\n FROM 'Rates{currency}';\n \"\"\"\n df = pd.read_sql_query(statement, conn)\n # format as Datetime\n df.Date = pd.to_datetime(df.Date)\n return df",
"def load_file(self):\n\n self.df = self.sqlContext.read.csv(self.source, sep=self.sep, header=True, inferSchema=True)",
"def load_data(file_name):\n return Orange.data.Table(file_name)",
"def parse(self):\n if self.filename.endswith('.gz'):\n compression = 'gzip'\n elif self.filename.endswith('.bz2'):\n compression = 'bz2'\n else:\n compression = None\n df = pd.read_table(self.filename, compression=compression)\n\n # drop empty column from extra tab\n df.dropna(axis=1, how='all', inplace=True)\n return df",
"def _load_sample_table(self):\n self.sampleTable = pd.read_table(self.config['sampletable'], sep='\\t', dtype=str)\n self.sampleTable.set_index('sampleID', inplace=True)\n self.samples = self.sampleTable.reset_index().to_dict('records')",
"def load_main_table(table_text):\n\n lines = table_text.split('\\n')\n i = 1\n cols = []\n for thing in lines[1].split('\",\"'):\n if thing in ['C ', 'I ', 'K ', 'E ', 'H ']:\n cols.append(thing.strip() + str(i) + ' ')\n if thing == 'H ':\n i += 1\n else:\n cols.append(thing)\n lines[1] = '\",\"'.join(cols)\n text = \"\\n\".join(lines[1:])\n df = pd.read_csv(StringIO(text))\n df.index = df['Student ID']\n\n return df",
"def read_table(self, db, table_name):\n engine = self.connect_to_database(db=db)\n df = pd.read_sql_table(table_name=table_name, con=engine)\n engine.connect().connection.close()\n return df",
"def loadValueTableFromSqlite(): \n conn = sqlite3.connect(prefix + args.db)\n df = io.read_frame(\"SELECT * FROM value\", conn) \n return df",
"def load_file_to_dataframe(self, file_path: str) -> pd.DataFrame:\n return pd.read_csv(file_path, sep=\"\\t\")",
"def table_to_df(db_name, table_name):\n return sqlContext.table(\"{0}.{1}\".format(db_name, table_name))",
"def OSW2df(osw_file, table_name):\n conn = connOSW(osw_file)\n df = pd.read_sql_query(\"SELECT * FROM \" + table_name, conn)\n conn.close()\n return df",
"def load_luigi_stats(db_path, table):\n engine = create_engine('sqlite:///' + db_path)\n return pd.read_sql_table(table, engine)",
"def load_table(self, table_name):\n LOGGER.info('Loading table %s', table_name)\n table_meta = self.get_table_meta(table_name)\n return _load_csv(self.root_path, table_meta)",
"def table_save_data_frame(self, table_name):\n self.recordset_df = pd.read_sql_table(table_name, self.con)\n return self",
"def load_table(date):\n if os.path.isfile(date+\".table\"):\n file_using = open(date+\".table\", \"r\")\n return create_table(file_using)\n else:\n return False"
] | [
"0.68337256",
"0.66754687",
"0.6670916",
"0.6593603",
"0.65836316",
"0.65808636",
"0.6576015",
"0.65135646",
"0.64685476",
"0.6413054",
"0.6406398",
"0.6404005",
"0.6317207",
"0.6181754",
"0.6180371",
"0.6171104",
"0.61608464",
"0.615348",
"0.6132976",
"0.6097652",
"0.60968494",
"0.6089037",
"0.6072367",
"0.6056862",
"0.6040255",
"0.6022786",
"0.60225147",
"0.60202515",
"0.60200864",
"0.60154223"
] | 0.8200111 | 0 |
lists contents of hydroshare irods userspace | def ils(self):
cmd = Popen(['ils'], stdout=PIPE, stderr=STDOUT, shell=True)
stdout = cmd.communicate()[0].decode('ascii')
if cmd.returncode != 0:
print('Failed to fetch irods file list: %s' % stdout)
return []
return [s.replace('C-', '').strip() for s in
stdout.split('\n')[1:] if s != '']
print('Not Implemented') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list():\n rino.remote.list()",
"def list_data(self):\n with self.read():\n keys = self.handle.keys()\n return [i.lstrip('/') for i in keys]",
"def list():",
"def list():",
"def list(self):",
"def list():\n data = getInstaData()\n return render_template(\"list.html\", data=data)",
"def ls():",
"def list_contents(reader: UFOReader) -> list[str]:\n return reader.getImageDirectoryListing() # type: ignore",
"def list():\n index = 0\n while True:\n node = Node.from_index(index)\n if os.path.exists(node.path()):\n click.echo(f'{index}: node_{index}')\n click.echo(run_lncli(node, 'getinfo | jq .identity_pubkey'))\n else:\n break\n index += 1",
"def list():\n rino.login.list()",
"def guestlist_handler(userdata, *args):\n\t\tfor guest in userdata[\"guestlist\"]:\n\t\t\tprint(shlex.quote(guest), end=\" \")\n\t\t\n\t\tprint()",
"def get_listfile(self, datadir):\n return []",
"def _list(self, irc, msg, args):\n # TODO: write _list; use local.punny modules print/list if avail\n pass",
"def ls():\n # TODO: listing all availabe containers form sequence\n return",
"def list(config, username, hostname):\n if (not username and not hostname) or (username and hostname):\n print 'Usage: igor permissions list [OPTIONS]'\n print\n print 'Error: Exactly one of --username or --hostname is required.'\n exit()\n\n if username:\n response = make_api_request('GET', config, '/users/' + username +\n '/machines')\n machines = response.json()['machines']\n for machine in machines:\n print machine['hostname']\n elif hostname:\n response = make_api_request('GET', config, '/machines/' + hostname +\n '/users')\n users = response.json()['users']\n for user in users:\n print user['username']",
"def list():\n\n\treturn netifaces.interfaces()",
"def interface_list() -> List[str]:\n cmd_ip = system_command('ip')\n command = f\"{cmd_ip} -o addr show up primary scope global\".split()\n result = SUDO.execute_unit(command)\n result.assert_return()\n line_list = result.stdout.splitlines()\n pattern = re.compile(r\"^\\d+[:]\\s+(\\S+)\\s+(.+)$\")\n select = lambda line: pattern.search(line).group(1)\n face_list = list(map(select, line_list))\n return face_list",
"def command_ls(self, list_what):\n if list_what in ('available', 'mounted', 'unmounted'):\n callback = getattr(self.environment, 'get_%s_ids' % list_what)\n lst = callback()\n else:\n lst = []\n if len(lst) != 0:\n print((\"\\n\".join(lst)))",
"def list(self, req, resp):\n interfaces = []\n for e in EntryPoints('tachyonic.element.interfaces'):\n interfaces.append({'id': e, 'name': e})\n return raw_list(req, interfaces)",
"def action_list():\n\n def parse_line(line):\n return [_.strip() for _ in line.split() if len(_) > 0]\n\n cmd = \"/usr/sbin/xl list\"\n try:\n _out = run(cmd).split('\\n')\n except Exception:\n out = None\n else:\n out = []\n\n # get headers from the first line\n headers = parse_line(_out[0])\n\n for line in _out[1:]:\n if len(line.strip()) > 0:\n # merge header's name with vm informations into a usable dict\n out.append(dict(zip(headers, parse_line(line))))\n\n return out",
"def dir_list(self, instance, where):\n\n instance = self.get_instance(instance)\n output = ''\n\n try:\n if instance.get('address'):\n username = instance.get('address') + \"@\" + instance.get('credentials').get('username')\n key = instance.get('credentials').get('publickey')\n output = subprocess.check_output([\"ssh\", key, username, 'ls', self.default_path_aws + where]).decode(\n \"utf-8\")\n else:\n username = 'ubuntu@' + instance.get('credentials').get('EC2_ACCESS_ID')\n key = instance.get('credentials').get('EC2_SECRET_KEY')\n # output = os.popen(\"ls\"+ \" | \" + \"ssh\"+ \" -i \"+ key +\" \"+ username).read()\n output = subprocess.check_output(\n [\"ssh\", \"-i\", key, username, 'ls', self.default_path_aws + where]).decode(\"utf-8\")\n return output\n except:\n return \"Fail to access the instance\"",
"def do_list(self, smth):\n def print_region(reader, start, size, location):\n print(\" %s - %s (%d bytes)\" % (reader.FormatIntPtr(start),\n reader.FormatIntPtr(start + size),\n size))\n print(\"Available memory regions:\")\n self.reader.ForEachMemoryRegion(print_region)",
"def lsinfo(name):",
"def user_list(server_object, client, address, command_args):\n\n\tmsg = \"\"\n\n\t#: Create a formatted string of all the users.\n\tfor usr in server_object.usrs.values():\n\t\tmsg += usr + '\\n'\n\n\tclient.send(msg.encode())",
"def do_list(args):\n session = BMC(server=args.server, username=args.username, password=args.password)\n for i in session.list(args.path):\n print(i)",
"def do_list(self, _):\n devices = []\n for source in self._target.devices:\n devices.append({\n 'name': source.device['name'],\n 'path': source.device['path'],\n })\n return devices",
"def get_volume_list():\n return parse_list_output(Popen('cinder list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])",
"def read(self):\n return list(self.pile_list)",
"def ls(filter=None):",
"def list(self, subcmd):\n\n self.__connect_db()\n tariffs = []\n\n for tariff in self.db.get_tariffs():\n tariffs.append(tariff.name)\n print(tariff.name)\n\n #print(\"\\n\".join(sorted(tariffs)))"
] | [
"0.6433237",
"0.6131444",
"0.6028496",
"0.6028496",
"0.60075146",
"0.5948483",
"0.5939381",
"0.591174",
"0.58719695",
"0.5800662",
"0.5770601",
"0.56504005",
"0.5648978",
"0.5616612",
"0.55690056",
"0.5552401",
"0.5549238",
"0.55253196",
"0.54935414",
"0.5482844",
"0.5472509",
"0.546833",
"0.5462314",
"0.5448469",
"0.544565",
"0.54382944",
"0.54336923",
"0.54134554",
"0.5410456",
"0.53984404"
] | 0.6275398 | 1 |
Prints help for a specified tool. | def print_specific_help(tool_name):
if tool_name not in AvailableCommands.commands:
print 'Command is not supported: {0}'.format(tool_name)
return
cmd = AvailableCommands.commands[tool_name]
print 'Usage of {0}:'.format(cmd.name)
print '\nAccepted input types:\n{0}'.format(str(list(cmd.input_types)))
print '\nOutput types:\n{0}'.format(str(cmd.output_types))
print '\nMandatory arguments:\n{0}'.format(str(cmd.user_mandatory_args))
print '\nOptional arguments:\n{0}'.format(str(cmd.user_optional_args))
print '\nParallelizable:\n{0}'.format(str(cmd.parallelizable))
print '\nAdditional description:\n{0}'.format(str(cmd.help_description))
print '' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def print_generic_help():\r\n print ART_NAME\r\n print 'Version {1}\\nby {2}'.format(NAME, VERSION, AUTHOR)\r\n print DESCRIPTION\r\n tools = sorted(AvailableCommands.commands.keys(), key=lambda v: v.upper())\r\n # Do not show CUSTOM command in the help\r\n tools.remove('CUSTOM')\r\n tools.remove('CUSTOM_NO_OUTPUT')\r\n print '\\n\\nSupported tools are:\\n{0}'.format('\\n'.join(tools))\r\n print '\\nHint: Check tool specific help with --help <tool_name>\\n'",
"def GetToolShortHelp(self, tool_id):\r\n\r\n tool = self.FindTool(tool_id)\r\n if not tool:\r\n return \"\"\r\n\r\n return tool.short_help",
"def printHelp(self,):\n print man\n return 0",
"def printhelp():",
"def print_help():\n parser = parsersetup()\n parser.print_help()",
"def print_help():\n\tprint(\"Help text\")",
"def help():\n print(UI.HELP)",
"def print_help(self, prog_name, subcommand):\n parser = self.create_parser(prog_name, subcommand)\n parser.print_help()",
"def print_help(self, prog_name, subcommand):\n parser = self.create_parser(prog_name, subcommand)\n parser.print_help()",
"def print_help(self, prog_name, subcommand):\n parser = self.create_parser(prog_name, subcommand)\n parser.print_help()",
"def print_help(self, prog_name, subcommand):\r\n parser = self.create_parser(prog_name, subcommand)\r\n parser.print_help()",
"def do_print_help(parser):\n string_io = StringIO()\n parser.print_help(file=string_io)\n return string_io.getvalue()",
"def help(self, dummy):\r\n help = self.doc + \"\\n\"\r\n if help.find(\"%s\") > 0:\r\n help = help.replace(\"%s\", self.progname)\r\n print_function(help, end='', file=self.stdout)\r\n self.exit(0)",
"def help(self, plugin):\n plug = plugin_source.load_plugin(plugin)\n plug.help()",
"def PrintHelp(self):\n self._parser.print_help()",
"def help(command=None):\n if command is None: \n # print first line of docstring\n for cmd in commands:\n ds = commands[cmd].__doc__.split('\\n')[0]\n print \"%-16s %s\" % (cmd,ds)\n else:\n print commands[command].__doc__",
"def help():",
"def _help(self):\n self.onecmd('help')",
"def help_help(self):\n print(\"List commands or print details about a command\")",
"def print_help(self):\n print self.get_help()",
"def help():\n print \"Help comes to those who ask\"",
"def print_mini_help(app_name):\n print \"\\nExecute the script with either '-h' or '--help' to obtain detailed help on how to run the script:\"\n print 'python {0} -h'.format(app_name)\n print \"or\"\n print 'python {0} --help\\n'.format(app_name)",
"def print_help(self):\n self.parser.print_help()",
"def print_help(self):\n\n print((\"Help is not defined for command \" + self.command))",
"def helpMe():\n print('')\n os.system('python2 ' + program + ' -h')\n print('')",
"def show_help():\n pass",
"def print_help():\n\n print(\"Mailroom Usage: <name>:add a donor and donation h:help l:list\"\n \"donors r:print report q:quit\")",
"def ShortHelp(doexit=True):\n print(parser.description)\n print()\n parser.print_usage()\n print()\n print(\"For advanced help use '{prog} -H' or '{prog} --full-help'\".format(prog=os.path.basename(sys.argv[0])))\n if doexit:\n sys.exit(ExitCode.OK)",
"def command_help(self):\n print(\"Command \", self)\n print(\"\\t\\thelp (Get help for command)\")\n\n params = self.params.copy()\n del params[\"help\"]\n\n if len(params) == 0:\n print(\"This command has no parameters\")\n return\n\n print(\"Parameters:\")\n for info in params.values():\n print(\" %s\" % info.get_basic_info())\n description = info.get_desc()\n if description != \"\":\n print(textwrap.fill(description,\n initial_indent=\" \",\n subsequent_indent=\" \",\n width=70))",
"def SetToolShortHelp(self, tool_id, help_string):\r\n \r\n tool = self.FindTool(tool_id)\r\n if tool:\r\n tool.short_help = help_string"
] | [
"0.7634922",
"0.72920835",
"0.72188467",
"0.71694416",
"0.7161719",
"0.71323955",
"0.70277065",
"0.7019475",
"0.7019475",
"0.7019475",
"0.69998395",
"0.69744694",
"0.6969143",
"0.6901238",
"0.6899467",
"0.6888493",
"0.68664163",
"0.6864778",
"0.68607914",
"0.6853852",
"0.67757356",
"0.6768491",
"0.6744065",
"0.6710773",
"0.67106175",
"0.6687847",
"0.66811496",
"0.6665168",
"0.66607976",
"0.66362774"
] | 0.83063513 | 0 |
Generates command line objects to compress/decompress a workflow. | def generate_compression_command_line_objects(dir_stack, command_line_parameters):
# Generate command lines
threads = []
thread_sizes = []
first_d = True
for d in dir_stack:
if first_d:
first_d = False
continue
if not os.path.isdir(d.path): continue
# Iterate over files in current directory and and generate (de)compression
# command line for files that are in suitable format
while True:
try:
# The command instance is generated without exceptions if the
# command execution has failed (i.e. expected output
# file does not exist). Otherwise NewFileError is raised.
if command_line_parameters.compress_run == 'compress':
command_line = AvailableCommands.commands['gzip']('', d, d)
elif command_line_parameters.compress_run == 'decompress':
command_line = AvailableCommands.commands['gzip']('-d', d, d)
except STAPLERerror.NewFileExists:
pass
except STAPLERerror.VirtualIOError:
break
abs_file_path = os.path.join(d.path, command_line.out_cmd['-!i'])
# Create new thread for current command if new threads can be created
if len(threads) < command_line_parameters.max_job_count or command_line_parameters.max_job_count is None:
threads.append([command_line])
thread_sizes.append(os.stat(abs_file_path).st_size)
# If max number of threads have been created, add command to the thread
# with the least amount of data to handle
else:
threads[thread_sizes.index(min(
thread_sizes))].append(command_line)
thread_sizes[thread_sizes.index(min(thread_sizes))] += \
os.stat(abs_file_path).st_size
# Report if no proper input files have been found
if not threads and command_line_parameters.compress_run == 'compress':
raise STAPLERerror('Workflow does not contain any files that can be compressed.')
if not threads and command_line_parameters.compress_run == 'decompress':
raise STAPLERerror('Workflow does not contain any files that can be decompressed.')
# Calculate & report estimated run time for the current job
if command_line_parameters.compress_run == 'compress':
# Assume that gzip compression speed is 20Mb per second (should
# give plenty of time for modern processors)
est_run_time = 'Estimated recommended run time for this job is (hh:mm:ss):\n' \
'{0}'.format(datetime.timedelta(seconds=(max(thread_sizes) / 20000000) + 60))
else:
# Assume that gzip decompression speed is 60Mb per second (should
# give plenty of time for modern processors)
est_run_time = 'Estimated recommended run time for this job is (hh:mm:ss):\n' \
'{0}'.format(datetime.timedelta(seconds=(max(thread_sizes) / 60000000) + 60))
print est_run_time
logging.info(est_run_time)
workloads = [threads]
return workloads | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n import argparse\n\n parser = argparse.ArgumentParser(\n description='Use the machine learning meta library shrynk to compress'\n )\n subparsers = parser.add_subparsers(dest=\"command\")\n compress = subparsers.add_parser('compress')\n compress.add_argument('file', help='file you want to compress')\n compress.add_argument('--size', '-s', default=3, type=int, help='Size weight for model')\n compress.add_argument('--write', '-w', default=1, type=int, help='Write-time weight for model')\n compress.add_argument('--read', '-r', default=1, type=int, help='Read-time weight for model')\n decompress = subparsers.add_parser('decompress')\n decompress.add_argument('file', help='file you want to decompress')\n benchmark = subparsers.add_parser('benchmark')\n benchmark.add_argument('file', help='file you want to benchmark')\n benchmark.add_argument('--size', '-s', default=3, type=int, help='Size weight for model')\n benchmark.add_argument('--write', '-w', default=1, type=int, help='Write-time weight for model')\n benchmark.add_argument('--read', '-r', default=1, type=int, help='Read-time weight for model')\n benchmark.add_argument('--predict', help='Read-time weight for model', action=\"store_true\")\n benchmark.add_argument('--save', help='Read-time weight for model', action=\"store_true\")\n args = parser.parse_args()\n if args.command == \"compress\":\n data = load(args.file)\n print(save(data, args.file, size=args.size, write=args.write, read=args.read))\n if args.command == \"decompress\":\n data = load(args.file)\n if \"json\" in args.file:\n ext = \"json\"\n kwargs = {\"compression\": None}\n end = args.file.index(\".\" + ext)\n destination = args.file[:end] + \".\" + ext\n elif \"csv\" in args.file or \"parquet\" in args.file:\n ext = \"csv\"\n kwargs = {\"engine\": \"csv\", \"compression\": None}\n end = args.file.index(\".\" + ext)\n destination = args.file[:end] + \".\" + ext\n else:\n kwargs = {\"compression\": None}\n destination = \".\".join(args.file.split(\".\")[:-1])\n save(data, destination, kwargs)\n elif args.command == \"benchmark\":\n if args.predict:\n data = load(args.file)\n print(\"Predicted:\", infer(data, size=args.size, write=args.write, read=args.read))\n if args.save:\n bench = run_benchmarks(args.file)\n bench = pd.DataFrame(bench, columns=[\"kwargs\", \"size\", \"write_time\", \"read_time\"])\n return print(add_z_to_bench(bench, args.size, args.write, args.read))\n else:\n print(show_benchmark(args.file, size=args.size, write=args.write, read=args.read))",
"def cli(yamlfile, **args):\n print(LogicProgramGenerator(yamlfile, **args).serialize(**args))",
"def generate():\n PackCommandExecutor().pack()\n GenerateCommandExecutor().generate()",
"def main():\n run_time_str = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\n log = _prepare_logging()\n Args = collections.namedtuple(\n \"Args\",\n (\n \"input_paths\",\n \"output_path\",\n \"root_directory\",\n \"ignore_dotfiles\",\n \"ignore_windows_volume_folders\",\n ),\n )\n # If we are running from Mac Automator, take file paths from sys.argv\n if check_running_from_automator():\n # Example sys.argv for two files selected: ['-c', '/absolute/path/1.txt',\n # '/absolute/path/to/2.txt']\n args = Args(\n input_paths=sys.argv[1:],\n output_path=None,\n root_directory=False,\n ignore_dotfiles=False,\n ignore_windows_volume_folders=False,\n )\n # Otherwise, use argparse and allow for some additional options\n else:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"input_paths\", nargs=\"+\", help=\"Items to compress\")\n parser.add_argument(\"-o\", \"--output_path\", \"--output\", help=\"Filename for zip\")\n parser.add_argument(\n \"-d\",\n \"--root-directory\",\n action=\"store_true\",\n help=\"Place all files in zip within a shared parent folder\",\n )\n parser.add_argument(\n \"--ignore-dotfiles\",\n action=\"store_true\",\n help=\"Ignore files and folders beginning with '.' (typically these are hidden folders)\",\n )\n parser.add_argument(\n \"--ignore-windows-volume-folders\",\n action=\"store_true\",\n help=(\n \"Ignore folders named 'System Volume Information' and '$RECYCLE.BIN' (typically\"\n \" these contain hidden system information)\"\n ),\n )\n\n parsed_args = parser.parse_args()\n args = Args(**vars(parsed_args))\n\n # Check passed arguments and return if issues\n if get_missing_sources(args.input_paths):\n printer(\n \"Path(s) {} not found\".format(get_list_as_str(get_missing_sources(args.input_paths))),\n \"error\",\n True,\n )\n return\n\n # Set path separator based on OS\n if platform.system() == \"Windows\":\n path_separator = \"\\\\\"\n else:\n path_separator = \"/\"\n\n # Convert input paths into absolute paths\n input_paths = [os.path.abspath(path) for path in args.input_paths]\n\n # Set output path\n if args.output_path is not None:\n output_path = args.output_path\n output_directory = os.path.dirname(output_path)\n else:\n if check_running_from_automator():\n # Last item in the list of arguments will be the last item clicked in Finder\n output_directory = os.path.dirname(input_paths[-1])\n else:\n output_directory = \".\"\n if len(input_paths) == 1:\n output_filename = os.path.basename(\"{}.zip\".format(input_paths[0]))\n else:\n output_filename = \"{}_archive.zip\".format(run_time_str)\n output_path = get_safe_file_path(os.path.join(output_directory, output_filename))\n printer(\"Zip file will be created at path '{}'\".format(output_path), \"info\")\n\n # Create zipfile and get file_hash_dict info for subsequent verification\n try:\n file_hash_dict, total_file_count = create_zip(\n output_path,\n input_paths,\n args.ignore_dotfiles,\n args.ignore_windows_volume_folders,\n args.root_directory,\n path_separator,\n )\n except:\n # Log the exception to a file, so we can view later if running from Automator\n error_log_file_path = os.path.join(\n output_directory, \"{}_verizip_error.txt\".format(run_time_str)\n )\n error_log_handler = logging.FileHandler(error_log_file_path)\n error_log_handler.setLevel(logging.ERROR)\n error_log_handler.setFormatter(\n logging.Formatter(\"%(asctime)s - %(levelname)s - %(message)s\")\n )\n log.addHandler(error_log_handler)\n log.exception(\"Exception occurred during creation of zip file '%s':\", output_path)\n printer(\n \"Error occurred - see '{}'\".format(os.path.abspath(error_log_file_path)), \"error\", True\n )\n if os.path.isfile(output_path):\n os.remove(output_path)\n return\n printer(\"'{}' finalised - will now be verified\".format(output_path), \"info\")\n\n # Get hashes of files within finalised zip\n zip_hash_dict = {}\n with zipfile.ZipFile(output_path, \"r\") as zip_handler:\n zip_file_listing = zip_handler.namelist()\n zip_file_count = 0\n for file_within_zip in zip_file_listing:\n # Todo: confirm no 'file_info.is_dir()' type check needed here - don't believe so, as\n # only files with paths are being added, rather than directories as separate archive\n # items\n zip_file_count += 1\n hash_value = hash_file_in_zip(zip_handler, file_within_zip)\n if hash_value not in zip_hash_dict:\n zip_hash_dict[hash_value] = []\n zip_hash_dict[hash_value].append(file_within_zip)\n\n # Verify that hashes from source files match those for compressed files within newly-created zip\n if file_hash_dict == zip_hash_dict and total_file_count == zip_file_count:\n printer(\"Verification complete; no discrepancies identified\", \"info\")\n printer(\"'{}' created successfully\".format(output_path), \"info\", True)\n else:\n error_log_file_path = os.path.join(\n output_directory, \"{}_verizip_error.txt\".format(run_time_str)\n )\n with open(error_log_file_path, \"w\") as error_log_file_handler:\n for hash_value, file_paths in file_hash_dict.items():\n if hash_value not in zip_hash_dict:\n error_log_file_handler.write(\n \"Hash '{}' not present in zip file (with expected files {})\\n\".format(\n hash_value, get_list_as_str(file_paths)\n )\n )\n elif sorted(file_paths) != sorted(zip_hash_dict[hash_value]):\n error_log_file_handler.write(\n \"Files for hash '{}' do not match between source and zip ({} in source - {}\"\n \" in zip)\\n\".format(hash_value, file_paths, zip_hash_dict[hash_value])\n )\n printer(\n \"'{}' failed verification - see error log at '{}'\".format(\n output_path, os.path.abspath(error_log_file_path)\n ),\n \"error\",\n True,\n )\n os.remove(output_path) # Delete the zip that failed verification",
"def pack():\n clean_local()\n build()\n copy_json()\n optimize()\n tarball()",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('action', choices=['encrypt', 'decrypt'])\n parser.add_argument('environment', choices=['staging', 'prod'])\n parser.add_argument('-v', \"--verbose\", help=\"increase output verbosity\",\n action=\"store_true\")\n parser.add_argument('-f', '--folder', action='store', type=str, required=True,\n help='Output folder for results.')\n parser.add_argument('-a', '--app', action='store', type=str, required=False,\n help='Application name without spaces. This name will be appended to output files')\n args = parser.parse_args()\n encode_decode(args)\n return 0",
"def main() -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument('--test_cmds', required=True,\n help='File containing --debug output')\n parser.add_argument('--output', required=True,\n help='Makefile to be constructed')\n parser.add_argument(\"--discard_stdstreams\", action='store_true',\n help='Redirect stdstreams to /dev/null')\n args = parser.parse_args()\n\n transform(args.discard_stdstreams, args.test_cmds, args.output)\n return 0",
"def generate_command_line_objects(input_file_parameters, dir_stack, auto_split_workflows):\r\n workflows = []\r\n prev_number_of_ids_per_command = None\r\n prev_command_had_output_dir = True\r\n first_command = True\r\n # Bools for splitting workflow. Separate values for automatically splitting workflow and\r\n # user defined splits, as user defined splits are applied in 'default' execute_mode, and\r\n # autosplits only when workflow is parallelized\r\n splitting_workflow_automatically = False\r\n user_splitting_workflow = False\r\n no_command_has_required_output_dir = True\r\n j = 0\r\n dir_stack_index = -1\r\n for current_command_type in input_file_parameters.commands:\r\n # Infer split points of workflow\r\n # Split workflow if user has inserted the SPLIT keyword in the STAPLEfile\r\n if current_command_type == 'SPLIT':\r\n user_splitting_workflow = True\r\n continue\r\n\r\n # If previous command had no output directory (i.e. output is created\r\n # to input directory), there is no need to increment the dir_stack index\r\n if prev_command_had_output_dir:\r\n dir_stack_index += 1\r\n\r\n # Reset id number tracking if workflow is split\r\n if splitting_workflow_automatically or user_splitting_workflow:\r\n first_command = True\r\n prev_number_of_ids_per_command = None\r\n\r\n current_step_commands = []\r\n command_type, command_parameters = \\\r\n utils.parse_staplefile_command_line(current_command_type)\r\n in_dir = dir_stack[dir_stack_index]\r\n if command_type.require_output_dir:\r\n out_dir = dir_stack[dir_stack_index+1]\r\n prev_command_had_output_dir = True\r\n no_command_has_required_output_dir = False\r\n else:\r\n out_dir = in_dir\r\n prev_command_had_output_dir = False\r\n #Read files until command class finds no more valid input files\r\n while True:\r\n try:\r\n current_command = command_type(command_parameters, in_dir, out_dir)\r\n # Check if workflow should be split (if user has defined automatic splitting)\r\n if not first_command and auto_split_workflows:\r\n if len(current_command.command_ids) > prev_number_of_ids_per_command:\r\n splitting_workflow_automatically = True\r\n\r\n current_step_commands.append(current_command)\r\n logging.info('-'*80)\r\n logging.info('User command line:\\n{0}'.format(input_file_parameters.commands[dir_stack_index]))\r\n logging.info('Final command line(s):\\n{0}'.format(\r\n '\\n'.join(current_command.command_lines)))\r\n logging.info('Input directory is:\\n{0}'.format(in_dir.path))\r\n logging.info('Output directory is:\\n{0}'.format(out_dir\r\n .path))\r\n j += 1\r\n print 'Created command line number {0} for {1}...'\\\r\n .format(j, command_type.name)\r\n except STAPLERerror.NewFileExists as existing_file_name:\r\n if no_command_has_required_output_dir:\r\n raise STAPLERerror.STAPLERerror('Starting point directory '\r\n 'already contains file '\r\n 'name {0}, which {1} '\r\n 'command would overwrite. '\r\n 'Either remove {1} from '\r\n 'this workflow or remove '\r\n '{0} and similar files '\r\n 'from the starting point '\r\n 'directory. Notice that '\r\n '--remove command will '\r\n 'not delete any files '\r\n 'from the starting point '\r\n 'directory.'\r\n .format(existing_file_name,\r\n command_type.name))\r\n raise STAPLERerror.STAPLERerror('File with name {0} already '\r\n 'exists in the output '\r\n 'directory {1}. Remove the '\r\n 'existing workflow or use the '\r\n '--fix_run feature to create '\r\n 'a fixed run.'.format(existing_file_name, out_dir.path))\r\n except STAPLERerror.VirtualIOError:\r\n break\r\n except STAPLERerror.NotConfiguredError:\r\n raise STAPLERerror.STAPLERerror('Trying to create command '\r\n 'lines for {0}, '\r\n 'but config.txt is missing '\r\n 'configuration for this '\r\n 'command. Edit config.txt '\r\n 'appropriately or refer to '\r\n 'manual to see how '\r\n 'to do this.'.format(command_type.name))\r\n if not current_step_commands:\r\n if command_type.name == 'custom':\r\n raise STAPLERerror.STAPLERerror(\r\n 'No proper existing or predicted '\r\n 'input files were found for '\r\n 'command\\n{0}\\n in the input '\r\n 'directory:\\n{1}\\n. Please revise the command line '\r\n 'by setting desired input file types to input '\r\n 'keywords e.g. $INPUT.fastq\\nInput '\r\n 'directory is predicted to '\r\n 'contain the following files:\\n{2}'.format(command_parameters,\r\n in_dir.path,\r\n ', '.join(in_dir.file_names.keys())))\r\n else:\r\n raise STAPLERerror.STAPLERerror('No proper existing or predicted '\r\n 'input files were found for '\r\n 'command {0} in the input '\r\n 'directory:\\n{1}\\nThis command '\r\n 'takes input files only in the '\r\n 'following formats:\\n{2}\\nInput '\r\n 'directory is predicted to '\r\n 'contain the following files:\\n{3}'.format(command_type.name,\r\n in_dir.path,\r\n '\\n'.join(command_type.input_types),\r\n ', '.join(in_dir.file_names.keys())))\r\n if first_command:\r\n workflows.append([current_step_commands])\r\n first_command = False\r\n else:\r\n if not splitting_workflow_automatically and not user_splitting_workflow:\r\n workflows[-1] += [current_step_commands]\r\n else:\r\n workflows.append([current_step_commands])\r\n prev_number_of_ids_per_command = len(current_command.command_ids)\r\n splitting_workflow_automatically = False\r\n user_splitting_workflow = False\r\n\r\n return workflows, dir_stack",
"def main():\n # Define Parser object and add to Toil\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawTextHelpFormatter)\n subparsers = parser.add_subparsers(dest='command')\n # Generate subparsers\n subparsers.add_parser('generate-config', help='Generates an editable config in the current working directory.')\n subparsers.add_parser('generate-manifest', help='Generates an editable manifest in the current working directory.')\n subparsers.add_parser('generate', help='Generates a config and manifest in the current working directory.')\n # Run subparser\n parser_run = subparsers.add_parser('run', help='Runs the BWA alignment pipeline')\n group = parser_run.add_mutually_exclusive_group()\n parser_run.add_argument('--config', default='config-toil-bwa.yaml', type=str,\n help='Path to the (filled in) config file, generated with \"generate-config\".')\n group.add_argument('--manifest', default='manifest-toil-bwa.tsv', type=str,\n help='Path to the (filled in) manifest file, generated with \"generate-manifest\". '\n '\\nDefault value: \"%(default)s\".')\n group.add_argument('--sample', nargs='+', action=required_length(2, 3),\n help='Space delimited sample UUID and fastq files in the format: uuid url1 [url2].')\n # Print docstring help if no arguments provided\n if len(sys.argv) == 1:\n parser.print_help()\n sys.exit(1)\n Job.Runner.addToilOptions(parser_run)\n args = parser.parse_args()\n # Parse subparsers related to generation of config and manifest\n cwd = os.getcwd()\n if args.command == 'generate-config' or args.command == 'generate':\n generate_file(os.path.join(cwd, 'config-toil-bwa.yaml'), generate_config)\n if args.command == 'generate-manifest' or args.command == 'generate':\n generate_file(os.path.join(cwd, 'manifest-toil-bwa.tsv'), generate_manifest)\n # Pipeline execution\n elif args.command == 'run':\n require(os.path.exists(args.config), '{} not found. Please run generate-config'.format(args.config))\n if not args.sample:\n args.sample = None\n require(os.path.exists(args.manifest), '{} not found and no sample provided. '\n 'Please run \"generate-manifest\"'.format(args.manifest))\n # Parse config\n parsed_config = {x.replace('-', '_'): y for x, y in yaml.load(open(args.config).read()).iteritems()}\n config = argparse.Namespace(**parsed_config)\n config.maxCores = int(args.maxCores) if args.maxCores else sys.maxint\n samples = [args.sample[0], args.sample[1:]] if args.sample else parse_manifest(args.manifest)\n # Sanity checks\n require(config.ref, 'Missing URL for reference file: {}'.format(config.ref))\n require(config.output_dir, 'No output location specified: {}'.format(config.output_dir))\n # Launch Pipeline\n Job.Runner.startToil(Job.wrapJobFn(download_reference_files, config, samples), args)",
"def run(self):\n self.compress(\n self.__config.public_key(),\n self.__config.input_dir(),\n self.__config.output_dir(),\n self.__config.suffix()\n )",
"def cmdline_main():\r\n import sys\r\n if (len(sys.argv) < 2 or len(sys.argv) > 4 or \"--help\" in sys.argv or\r\n \"-h\" in sys.argv or sys.argv[1] not in (\"-c\", \"-d\")):\r\n print(\"Usage: python -m snappy <-c/-d> [src [dst]]\")\r\n print(\" -c compress\")\r\n print(\" -d decompress\")\r\n print(\"output is stdout if dst is omitted or '-'\")\r\n print(\"input is stdin if src and dst are omitted or src is '-'.\")\r\n sys.exit(1)\r\n\r\n if len(sys.argv) >= 4 and sys.argv[3] != \"-\":\r\n dst = open(sys.argv[3], \"wb\")\r\n elif hasattr(sys.stdout, 'buffer'):\r\n dst = sys.stdout.buffer\r\n else:\r\n dst = sys.stdout\r\n\r\n if len(sys.argv) >= 3 and sys.argv[2] != \"-\":\r\n src = open(sys.argv[2], \"rb\")\r\n elif hasattr(sys.stdin, \"buffer\"):\r\n src = sys.stdin.buffer\r\n else:\r\n src = sys.stdin\r\n\r\n if sys.argv[1] == \"-c\":\r\n method = stream_compress\r\n else:\r\n method = stream_decompress\r\n\r\n method(src, dst)",
"def cli(yamlfile, **args):\n print(ShExGenerator(yamlfile, **args).serialize(**args))",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-data_dir\", required=True, help=\"Directory containing original data set in requisite folder structure (small part or all data)\")\n parser.add_argument(\"-features_filename\", required=True, help=\"Features cloudpickle file that provides that pruning information\")\n parser.add_argument(\"-start_seed\", type=int, default=1284171779)\n parser.add_argument(\"-num_datasets\", type=int, default=20)\n parser.add_argument(\"-modes\", choices=[PREPROCESS, TRAIN, EVALUATE], nargs=\"+\", required=True)\n args = parser.parse_args()\n return pipeline(args)",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-w\", \"--workflow_path\", help='Path to workflow file')\n parser.add_argument(\"-g\", \"--galaxy\",\n dest=\"galaxy_url\",\n help=\"Target Galaxy instance URL/IP address (required \"\n \"if not defined in the tools list file)\",)\n parser.add_argument(\"-a\", \"--apikey\",\n dest=\"api_key\",\n help=\"Galaxy admin user API key (required if not \"\n \"defined in the tools list file)\",)\n args = parser.parse_args()\n\n gi = galaxy.GalaxyInstance(url=args.galaxy_url, key=args.api_key)\n\n with open(args.workflow_path, 'r') as wf_file:\n import_uuid = json.load(wf_file).get('uuid')\n existing_uuids = [d.get('latest_workflow_uuid') for d in gi.workflows.get_workflows()]\n if import_uuid not in existing_uuids:\n gi.workflows.import_workflow_from_local_path(args.workflow_path)",
"def main():\n if len(sys.argv) != 2:\n print(\"Error: Incorrect number of arguments. Expected 1.\")\n print(\"Usage: python compress.py <path to file to compress>\")\n print(\"Example: python compress.py zones.json\")\n exit(1)\n\n with open(sys.argv[1], \"rb\") as r, \\\n open(\"{}.br\".format(sys.argv[1]), \"wb\") as w:\n w.write(brotli.compress(r.read()))",
"def cli() -> object:\n parser = argparse.ArgumentParser(description=\"Expression Compiler\")\n parser.add_argument(\"sourcefile\", type=argparse.FileType('r'),\n help=\"Source program text\")\n parser.add_argument(\"outfile\", type=argparse.FileType('w'),\n nargs=\"?\", default=sys.stdout,\n help=\"Output file for assembly code\")\n args = parser.parse_args()\n return args",
"def main():\n parser = argparse.ArgumentParser(description='Create packaged set of modulefiles for deployment on OASIS.')\n parser.add_argument('--location', dest='location', default=None,\n help='Location directory to place files in')\n parser.add_argument('--tarfile', dest='tarfile', default=None,\n help='Name of tarfile to generate')\n args = parser.parse_args(sys.argv[1:])\n if args.location is None:\n args.location = tempfile.mkdtemp()\n elif os.path.exists(args.location):\n overwrite = raw_input(\"{0} exists, overwrite? \".format(args.location))\n if overwrite.lower().strip() != 'y':\n sys.stderr.write(\"Exiting...\")\n sys.exit(0)\n shutil.rmtree(args.location)\n os.mkdir(args.location)\n else:\n os.mkdir(args.location)\n location = checkout_repo(args.location) \n if location is None:\n sys.stderr.write(\"Can't checkout modulefiles to {0}!\\n\".format(args.location))\n package_files(location)\n if args.tarfile is None:\n args.tarfile = \"/tmp/moduleupdate.tar.gz\"\n if tar_files(location, args.tarfile) is None:\n sys.stderr.write(\"Error generating tarfile, exiting\\n\")\n sys.exit(1)\n shutil.rmtree(location)\n sys.stdout.write(\"Packaged files located at {0}\\n\".format(args.tarfile))",
"def main():\n if len(sys.argv) != 2:\n print('Usage: release.py <version>', file=sys.stderr)\n exit(1)\n version = sys.argv[1]\n with open('./manifest.json', 'r+') as f:\n manifest = json.load(f)\n manifest['version'] = version\n f.seek(0)\n json.dump(manifest, f, indent=2)\n f.truncate()\n\n os.system(f'zip cses-filter-v{version}.zip -r icons/ src/ manifest.json')",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('-i', '--input-ontology',\n default=config_test.config[\"msh_test_onto\"])\n parser.add_argument('-s', '--signature')\n parser.add_argument('-f', '--format-name', default=None)\n parser.add_argument('-o', '--output-file', default=\"ontology/output.owl\")\n parser.add_argument('-d', '--max-depth', default=10)\n parser.add_argument('-l', '--locality', default='top')\n\n args = parser.parse_args()\n\n g = Graph().parse(args.input_ontology, format=args.format_name)\n resource = entity_mapper.match_entity(args.signature, g)\n ontomodule = extract_module.extract_module(\n [resource], g, locality=args.locality, max_depth=args.max_depth)\n\n with open(args.output_file, \"w\") as f:\n ontomodule.serialize(f)",
"def main(args: List[Union[str, bytes]] = sys.argv,):\n\tprogram_name, *args = args\n\targs = decode_raw_args(args, str)\n\n\tgen = Generator(*args)\n\tgen.generate_data()\n\tgen.print_return_list()",
"def task_build(argv):\n pytaskmaster.generator(\"setup.py.in\", \"setup.py\", config)\n pytaskmaster.generator(\"pytaskmaster/version.py.in\", \"pytaskmaster/version.py\", config)\n shell(\"python setup.py bdist_wheel\")\n if \"--sign\" in argv:\n for file in os.listdir(\"dist\"):\n asc_file = \"dist/\" + file + \".asc\"\n if file.endswith(\".whl\") and not os.path.isfile(asc_file):\n shell(\"gpg --detach-sign -a dist/{}\".format(file))",
"def main():\n parser = argparse.ArgumentParser(description=MAIN_DESCRIPTION)\n parser.add_argument('-a', '--algorithm', help=ALGORITHM_DESCRIPTION)\n parser.add_argument('-n', '--number', type=int, help=NUMBER_DESCRIPTION)\n parser.add_argument('-o', '--order', help=ORDER_DESCRIPTION)\n parser.add_argument('-s', '--size', help=SIZE_DESCRIPTION)\n args = parser.parse_args()\n try:\n if not (args.algorithm and args.number and args.order and args.size):\n raise ValueError\n create_structure()\n try:\n data = get_data(args.number, args.order, args.size)\n except IOError:\n data = generate_in_files(args.number, args.order, args.size)\n finally:\n alg, out, time = sorting_algorithm(data, args.algorithm)\n # generate_out_files(out, args.number)\n generate_log_file(args.algorithm, args.number, args.order,\n args.size, alg.compares, alg.moves, time)\n except (TypeError, UnboundLocalError, ValueError) as e:\n parser.print_help()",
"def main(args):\n\n data = {\n 'id': '00353',\n 'expanded_folder': '00353.1/9a0f0b0d-1f0b-47c8-88ef-050bd9cdff92',\n 'version': '1',\n 'status': 'VOR',\n 'updated_date': datetime.strftime(datetime.utcnow(), \"%Y-%m-%dT%H:%M:%S\")\n }\n\n settings = settings_lib.get_settings('exp')\n identity = \"resize_%s\" % int(random.random() * 1000)\n log_file = \"worker.log\"\n logger = log.logger(log_file, settings.setLevel, identity)\n conn = boto.swf.layer1.Layer1(settings.aws_access_key_id, settings.aws_secret_access_key)\n act = activity_ArchiveArticle(settings, logger, conn=conn)\n act.do_activity(data)",
"def __cmd_builder(self):\n self.cmd = 'python -m lizard \"%s\" ' % self.get_proj_path()\n args = \"\"\n if self.get_cyclo_args():\n args = self.get_cyclo_args()\n exclude = \",\".join(str(x) for x in self.get_cyclo_exclude() if x is not None)\n if exclude:\n exclude = ','.join(' -x \"{0}\"'.format(w) for w in exclude.rstrip().split(','))\n self.cmd = self.cmd + args + \" \" + exclude + \" --csv\"\n print(self.cmd) # pragma: no mutate",
"def main(args):\n\n for dir in args.dirs:\n # prepdir = mdssprep.Directory(dir,exclude=['file_*3*','file_2??'],include=['file_*5*'],maxarchivesize=mdssprep.one_meg*200.,minsize=mdssprep.one_meg*100.)\n prepdir = mdssprep.Directory(dir)\n prepdir.archive(dryrun=False)",
"def regenerate_command_line_objects(input_file_parameters, dir_stack,\r\n auto_split_workflows):\r\n workflows = []\r\n prev_number_of_ids_per_command = None\r\n prev_command_had_output_dir = True\r\n first_command = True\r\n # Bools for splitting workflow. Separate values for automatically splitting workflow and\r\n # user defined splits, as user defined splits are applied in 'default' execute_mode, and\r\n # autosplits only when workflow is parallelized\r\n splitting_workflow_automatically = False\r\n user_splitting_workflow = False\r\n j = 0\r\n dir_stack_index = -1\r\n for current_command_type in input_file_parameters.commands:\r\n # Infer split points of workflow\r\n # Split workflow if user has inserted the SPLIT keyword in the STAPLEfile\r\n if current_command_type == 'SPLIT':\r\n user_splitting_workflow = True\r\n continue\r\n\r\n # If previous command had no output directory (i.e. output is created\r\n # to input directory), there is no need to increment the dir_stack index\r\n if prev_command_had_output_dir:\r\n dir_stack_index += 1\r\n\r\n # Reset id number tracking if workflow is split\r\n if splitting_workflow_automatically or user_splitting_workflow:\r\n first_command = True\r\n prev_number_of_ids_per_command = None\r\n\r\n current_step_commands = []\r\n command_type, command_parameters = \\\r\n utils.parse_staplefile_command_line(current_command_type)\r\n in_dir = dir_stack[dir_stack_index]\r\n if command_type.require_output_dir:\r\n out_dir = dir_stack[dir_stack_index+1]\r\n prev_command_had_output_dir = True\r\n else:\r\n out_dir = in_dir\r\n prev_command_had_output_dir = False\r\n\r\n # Read files until command class finds no more valid input files\r\n successful_commands = 0\r\n current_command = None\r\n while True:\r\n try:\r\n # The command instance is generated without exceptions if the\r\n # command execution has failed (i.e. expected output\r\n # file does not exist). Otherwise NewFileError is raised.\r\n current_command = command_type(command_parameters, in_dir, out_dir)\r\n except STAPLERerror.NewFileExists:\r\n successful_commands += 1\r\n continue\r\n except STAPLERerror.VirtualIOError:\r\n break\r\n except STAPLERerror.NotConfiguredError:\r\n raise STAPLERerror.STAPLERerror('Trying to create command '\r\n 'lines for {0}, '\r\n 'but config.txt is missing '\r\n 'configuration for this '\r\n 'command. Edit config.txt '\r\n 'appropriately or refer to '\r\n 'manual to see how '\r\n 'to do this.'.format(command_type.name))\r\n\r\n # If command can be created, check if the workflow should be split\r\n # automatically (when user has defined automatic splitting)\r\n if not first_command and auto_split_workflows:\r\n if len(current_command.command_ids) > prev_number_of_ids_per_command:\r\n splitting_workflow_automatically = True\r\n current_step_commands.append(current_command)\r\n logging.info('-'*80)\r\n logging.info('User command line:\\n{0}'.format(input_file_parameters.commands[dir_stack_index]))\r\n logging.info('Final command line(s):\\n{0}'.format(\r\n '\\n'.join(current_command.command_lines)))\r\n logging.info('Input directory is:\\n{0}'.format(in_dir.path))\r\n logging.info('Output directory is:\\n{0}'.format(out_dir\r\n .path))\r\n j += 1\r\n if not current_step_commands and not successful_commands:\r\n raise STAPLERerror.STAPLERerror('No proper existing or predicted '\r\n 'input files were found for '\r\n 'command {0} in the input '\r\n 'directory:\\n{1}\\nThis command '\r\n 'takes input files only in the '\r\n 'following formats:\\n{2}\\nInput '\r\n 'directory is predicted to '\r\n 'contain the following files:\\n{'\r\n '3}'.format(command_type.name,\r\n in_dir.path,\r\n '\\n'.join(command_type.input_types),\r\n ', '.join(in_dir.file_names.keys())))\r\n print '{0} command (step number {1}) was regenerated {2} ' \\\r\n 'times'.format(command_type.name, dir_stack_index+1, len(current_step_commands))\r\n if current_step_commands:\r\n if first_command:\r\n workflows.append([current_step_commands])\r\n first_command = False\r\n elif current_command is not None:\r\n if not splitting_workflow_automatically and not user_splitting_workflow:\r\n workflows[-1] += [current_step_commands]\r\n else:\r\n workflows.append([current_step_commands])\r\n\r\n if current_command is None:\r\n prev_number_of_ids_per_command = -1\r\n else:\r\n prev_number_of_ids_per_command = len(current_command.command_ids)\r\n splitting_workflow_automatically = False\r\n user_splitting_workflow = False\r\n\r\n return workflows, dir_stack",
"def main():\n parser = argparse.ArgumentParser(description=\"Wrapper for the GROMACS make_ndx module.\",\n formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))\n parser.add_argument('-c', '--config', required=False, help=\"This file can be a YAML file, JSON file or JSON string\")\n\n # Specific args of each building block\n required_args = parser.add_argument_group('required arguments')\n required_args.add_argument('--input_structure_path', required=True)\n required_args.add_argument('--output_ndx_path', required=True)\n parser.add_argument('--input_ndx_path', required=False)\n\n args = parser.parse_args()\n config = args.config if args.config else None\n properties = settings.ConfReader(config=config).get_prop_dic()\n\n # Specific call of each building block\n make_ndx(input_structure_path=args.input_structure_path,\n output_ndx_path=args.output_ndx_path,\n input_ndx_path=args.input_ndx_path,\n properties=properties)",
"def export_workflow(args):\n if args.type == 'magnis':\n clarity_epp.export.workflow.helix_magnis(lims, args.process_id, args.output_file)\n elif args.type == 'mip':\n clarity_epp.export.workflow.helix_mip(lims, args.process_id, args.output_file)",
"def cmdLine():\n parser = argparse.ArgumentParser(description=\"Encode decode any file\"\n ,prog='codec') #usage='codec -e'\n parser.add_argument('-e','--encoder', help='Encoder number', type=int)\n parser.add_argument('-d', '--decoder', help='Decoder numer', type=int)\n parser.add_argument('-i', '--input', help='Input file name to encode/decode', type=str)\n parser.add_argument('-o', '--output', help='Output file name', type=str)\n parser.add_argument('-l', '--list', help='list of all encode/decoder'\n ,action='store_true')\n parser.add_argument('-p', '--passphrase', help='Pass phrase to encode file', type=str)\n parser.add_argument('-t', '--time', help='Validity time of encoded file in seconds', type=int, default=0)\n return parser.parse_args()",
"def read_arguments():\n\n parser = argparse.ArgumentParser(\n description='Enter arguments to run the pipeline.')\n\n # arguments for external files that might be necessary to run the program\n parser.add_argument(\n '--cost_network', type=str,\n help='file storing the state dictionary of the cost network.'\n )\n\n parser.add_argument(\n '--policy_network', type=str,\n help='File storing the state dictionary of the Policy network.'\n )\n\n parser.add_argument(\n '--state_dictionary', type=str,\n help='Environment on which to run the algo (obstacle/no obstacle)'\n )\n\n parser.add_argument(\n '--expert_trajectory_file', type=str,\n help='Path to file containing the exeprt trajectories.')\n\n # network hyper parameters\n parser.add_argument(\n '--cost_network_input', type=int, default=29,\n help='layer size of cost network. None if you have specified cost \\\n network state dict.')\n\n parser.add_argument(\n '--cost_network_hidden', nargs='+', type=int, default=[256, 256],\n help='Hidden size of cost network.None if you have specified cost \\\n network state dict.')\n\n parser.add_argument(\n '--cost_network_output', type=int, default=1,\n help='Output layer size of cost network.None if you have specified \\\n cost network state dict.')\n\n parser.add_argument(\n '--policy_network_input', type=int, default=29,\n help='Input layer size of policy network.None if you have specified \\\n policy network state dict.')\n\n parser.add_argument(\n '--policy_network_hidden', nargs='+', type=int, default=[256, 256],\n help='Hidden layer size of policy network.None if you have specified \\\n policy network state dict.')\n\n parser.add_argument(\n '--policy_network_output', type=int, default=4,\n help='Output layer size of policy network.None if you have specified \\\n policy network state dict.')\n\n # other run hyper parameters like optimizer and all???\n\n # run hyperparameters\n parser.add_argument('--irl_iterations', type=int,\n help='Number of times to iterate over the IRL part.')\n\n parser.add_argument(\n '--no_of_samples', type=int,\n help='Number of samples to create agent state visitation frequency.')\n\n parser.add_argument(\n '--rl_iterations', type=int,\n help='Number of iterations to be performed in the RL section.')\n\n # arguments for the I/O of the program\n parser.add_argument(\n '--display_board', type=str, default='False',\n help='If True, draw envirnment.')\n\n parser.add_argument(\n '--on_server', type=str, default='True',\n help='False if program is to run on server.')\n\n parser.add_argument('--store_results', type=str, default='True')\n\n parser.add_argument(\n '--plot_interval', type=int, default=10,\n help='Iterations before loss and reward curve plots are stored.')\n\n parser.add_argument(\n '--savedict_policy_interval', type=int, default=100,\n help='Iterations after which the policy network will be stored.')\n\n parser.add_argument(\n '--savedict_cost_interval', type=int, default=1,\n help='Iterations after which the cost network will be stored.')\n\n # arguments for the broader pipeLine\n parser.add_argument(\n '--rl_method', type=str,\n help='Enter the RL method to be used.')\n\n parser.add_argument(\n '--feature_space', type=str,\n help='Type of features to be used to get the state of the agent.')\n\n parser.add_argument('--irl_method', type=str,\n help='Enter the IRL method to be used.')\n\n parser.add_argument(\n '--run_type', type=str, default='train',\n help='Enter if it is a train run or a test run.(train/test).')\n\n parser.add_argument(\n '--verbose', type=str, default='False',\n help='Set verbose to \"True\" to get a myriad of print statements crowd\\\n your terminal. Necessary information should be provided with either\\\n of the modes.')\n\n parser.add_argument(\n '--no_of_testRuns', type=int, default=0,\n help='If --run_type set to test, then this denotes the number of test \\\n runs you want to conduct.')\n\n _args = parser.parse_args()\n\n return _args"
] | [
"0.636741",
"0.61607385",
"0.6109988",
"0.5978106",
"0.5915388",
"0.58942014",
"0.58697283",
"0.58455306",
"0.5824884",
"0.58116597",
"0.5769307",
"0.5750805",
"0.57481354",
"0.5736904",
"0.57217336",
"0.5676748",
"0.5655071",
"0.5651055",
"0.5635633",
"0.5598006",
"0.5588914",
"0.55672103",
"0.55587125",
"0.55153644",
"0.5511112",
"0.5506395",
"0.5495748",
"0.5493936",
"0.54731333",
"0.5473119"
] | 0.65829694 | 0 |
Generates commands to execute workflow for each input file. | def generate_command_line_objects(input_file_parameters, dir_stack, auto_split_workflows):
workflows = []
prev_number_of_ids_per_command = None
prev_command_had_output_dir = True
first_command = True
# Bools for splitting workflow. Separate values for automatically splitting workflow and
# user defined splits, as user defined splits are applied in 'default' execute_mode, and
# autosplits only when workflow is parallelized
splitting_workflow_automatically = False
user_splitting_workflow = False
no_command_has_required_output_dir = True
j = 0
dir_stack_index = -1
for current_command_type in input_file_parameters.commands:
# Infer split points of workflow
# Split workflow if user has inserted the SPLIT keyword in the STAPLEfile
if current_command_type == 'SPLIT':
user_splitting_workflow = True
continue
# If previous command had no output directory (i.e. output is created
# to input directory), there is no need to increment the dir_stack index
if prev_command_had_output_dir:
dir_stack_index += 1
# Reset id number tracking if workflow is split
if splitting_workflow_automatically or user_splitting_workflow:
first_command = True
prev_number_of_ids_per_command = None
current_step_commands = []
command_type, command_parameters = \
utils.parse_staplefile_command_line(current_command_type)
in_dir = dir_stack[dir_stack_index]
if command_type.require_output_dir:
out_dir = dir_stack[dir_stack_index+1]
prev_command_had_output_dir = True
no_command_has_required_output_dir = False
else:
out_dir = in_dir
prev_command_had_output_dir = False
#Read files until command class finds no more valid input files
while True:
try:
current_command = command_type(command_parameters, in_dir, out_dir)
# Check if workflow should be split (if user has defined automatic splitting)
if not first_command and auto_split_workflows:
if len(current_command.command_ids) > prev_number_of_ids_per_command:
splitting_workflow_automatically = True
current_step_commands.append(current_command)
logging.info('-'*80)
logging.info('User command line:\n{0}'.format(input_file_parameters.commands[dir_stack_index]))
logging.info('Final command line(s):\n{0}'.format(
'\n'.join(current_command.command_lines)))
logging.info('Input directory is:\n{0}'.format(in_dir.path))
logging.info('Output directory is:\n{0}'.format(out_dir
.path))
j += 1
print 'Created command line number {0} for {1}...'\
.format(j, command_type.name)
except STAPLERerror.NewFileExists as existing_file_name:
if no_command_has_required_output_dir:
raise STAPLERerror.STAPLERerror('Starting point directory '
'already contains file '
'name {0}, which {1} '
'command would overwrite. '
'Either remove {1} from '
'this workflow or remove '
'{0} and similar files '
'from the starting point '
'directory. Notice that '
'--remove command will '
'not delete any files '
'from the starting point '
'directory.'
.format(existing_file_name,
command_type.name))
raise STAPLERerror.STAPLERerror('File with name {0} already '
'exists in the output '
'directory {1}. Remove the '
'existing workflow or use the '
'--fix_run feature to create '
'a fixed run.'.format(existing_file_name, out_dir.path))
except STAPLERerror.VirtualIOError:
break
except STAPLERerror.NotConfiguredError:
raise STAPLERerror.STAPLERerror('Trying to create command '
'lines for {0}, '
'but config.txt is missing '
'configuration for this '
'command. Edit config.txt '
'appropriately or refer to '
'manual to see how '
'to do this.'.format(command_type.name))
if not current_step_commands:
if command_type.name == 'custom':
raise STAPLERerror.STAPLERerror(
'No proper existing or predicted '
'input files were found for '
'command\n{0}\n in the input '
'directory:\n{1}\n. Please revise the command line '
'by setting desired input file types to input '
'keywords e.g. $INPUT.fastq\nInput '
'directory is predicted to '
'contain the following files:\n{2}'.format(command_parameters,
in_dir.path,
', '.join(in_dir.file_names.keys())))
else:
raise STAPLERerror.STAPLERerror('No proper existing or predicted '
'input files were found for '
'command {0} in the input '
'directory:\n{1}\nThis command '
'takes input files only in the '
'following formats:\n{2}\nInput '
'directory is predicted to '
'contain the following files:\n{3}'.format(command_type.name,
in_dir.path,
'\n'.join(command_type.input_types),
', '.join(in_dir.file_names.keys())))
if first_command:
workflows.append([current_step_commands])
first_command = False
else:
if not splitting_workflow_automatically and not user_splitting_workflow:
workflows[-1] += [current_step_commands]
else:
workflows.append([current_step_commands])
prev_number_of_ids_per_command = len(current_command.command_ids)
splitting_workflow_automatically = False
user_splitting_workflow = False
return workflows, dir_stack | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def execute(self):\n for line in fileinput.input():\n line = line.rstrip()\n self._process_command(line)",
"def regenerate_command_line_objects(input_file_parameters, dir_stack,\r\n auto_split_workflows):\r\n workflows = []\r\n prev_number_of_ids_per_command = None\r\n prev_command_had_output_dir = True\r\n first_command = True\r\n # Bools for splitting workflow. Separate values for automatically splitting workflow and\r\n # user defined splits, as user defined splits are applied in 'default' execute_mode, and\r\n # autosplits only when workflow is parallelized\r\n splitting_workflow_automatically = False\r\n user_splitting_workflow = False\r\n j = 0\r\n dir_stack_index = -1\r\n for current_command_type in input_file_parameters.commands:\r\n # Infer split points of workflow\r\n # Split workflow if user has inserted the SPLIT keyword in the STAPLEfile\r\n if current_command_type == 'SPLIT':\r\n user_splitting_workflow = True\r\n continue\r\n\r\n # If previous command had no output directory (i.e. output is created\r\n # to input directory), there is no need to increment the dir_stack index\r\n if prev_command_had_output_dir:\r\n dir_stack_index += 1\r\n\r\n # Reset id number tracking if workflow is split\r\n if splitting_workflow_automatically or user_splitting_workflow:\r\n first_command = True\r\n prev_number_of_ids_per_command = None\r\n\r\n current_step_commands = []\r\n command_type, command_parameters = \\\r\n utils.parse_staplefile_command_line(current_command_type)\r\n in_dir = dir_stack[dir_stack_index]\r\n if command_type.require_output_dir:\r\n out_dir = dir_stack[dir_stack_index+1]\r\n prev_command_had_output_dir = True\r\n else:\r\n out_dir = in_dir\r\n prev_command_had_output_dir = False\r\n\r\n # Read files until command class finds no more valid input files\r\n successful_commands = 0\r\n current_command = None\r\n while True:\r\n try:\r\n # The command instance is generated without exceptions if the\r\n # command execution has failed (i.e. expected output\r\n # file does not exist). Otherwise NewFileError is raised.\r\n current_command = command_type(command_parameters, in_dir, out_dir)\r\n except STAPLERerror.NewFileExists:\r\n successful_commands += 1\r\n continue\r\n except STAPLERerror.VirtualIOError:\r\n break\r\n except STAPLERerror.NotConfiguredError:\r\n raise STAPLERerror.STAPLERerror('Trying to create command '\r\n 'lines for {0}, '\r\n 'but config.txt is missing '\r\n 'configuration for this '\r\n 'command. Edit config.txt '\r\n 'appropriately or refer to '\r\n 'manual to see how '\r\n 'to do this.'.format(command_type.name))\r\n\r\n # If command can be created, check if the workflow should be split\r\n # automatically (when user has defined automatic splitting)\r\n if not first_command and auto_split_workflows:\r\n if len(current_command.command_ids) > prev_number_of_ids_per_command:\r\n splitting_workflow_automatically = True\r\n current_step_commands.append(current_command)\r\n logging.info('-'*80)\r\n logging.info('User command line:\\n{0}'.format(input_file_parameters.commands[dir_stack_index]))\r\n logging.info('Final command line(s):\\n{0}'.format(\r\n '\\n'.join(current_command.command_lines)))\r\n logging.info('Input directory is:\\n{0}'.format(in_dir.path))\r\n logging.info('Output directory is:\\n{0}'.format(out_dir\r\n .path))\r\n j += 1\r\n if not current_step_commands and not successful_commands:\r\n raise STAPLERerror.STAPLERerror('No proper existing or predicted '\r\n 'input files were found for '\r\n 'command {0} in the input '\r\n 'directory:\\n{1}\\nThis command '\r\n 'takes input files only in the '\r\n 'following formats:\\n{2}\\nInput '\r\n 'directory is predicted to '\r\n 'contain the following files:\\n{'\r\n '3}'.format(command_type.name,\r\n in_dir.path,\r\n '\\n'.join(command_type.input_types),\r\n ', '.join(in_dir.file_names.keys())))\r\n print '{0} command (step number {1}) was regenerated {2} ' \\\r\n 'times'.format(command_type.name, dir_stack_index+1, len(current_step_commands))\r\n if current_step_commands:\r\n if first_command:\r\n workflows.append([current_step_commands])\r\n first_command = False\r\n elif current_command is not None:\r\n if not splitting_workflow_automatically and not user_splitting_workflow:\r\n workflows[-1] += [current_step_commands]\r\n else:\r\n workflows.append([current_step_commands])\r\n\r\n if current_command is None:\r\n prev_number_of_ids_per_command = -1\r\n else:\r\n prev_number_of_ids_per_command = len(current_command.command_ids)\r\n splitting_workflow_automatically = False\r\n user_splitting_workflow = False\r\n\r\n return workflows, dir_stack",
"def process_input_files(inputs):\n for ifile in inputs:\n with open(ifile) as fin:\n exec(compile(fin.read(), ifile, 'exec'))",
"def task_generate_tasks():\n \n yield {\n 'basename': 'generate_tasks',\n 'name': None,\n # 'doc': 'docs for X',\n 'watch': ['trains/'],\n 'task_dep': ['create_folders'],\n }\n \n for root, dirs, files in os.walk('trains/',topdown=False):\n for f in files:\n #print(f)\n yield template_train_model(os.path.join(root,f))",
"def process_commands(self, commands: List[str]):",
"def write_default(workflows, output_dir):\r\n\r\n # Calculate the total number of commands\r\n number_of_commands = 0\r\n for workflow in workflows:\r\n number_of_commands += sum(map(len, workflow))\r\n\r\n # Create command line strings\r\n i = 0\r\n out_lines = ['echo Started executing shell script at:', 'date']\r\n for workflow in workflows:\r\n for workflow_step in workflow:\r\n for cmd in workflow_step:\r\n i += 1\r\n cmd_list = cmd.command_lines\r\n cmd_list = map(clean_command_lines, cmd_list)\r\n out_lines.append('echo Executing command {0}/{1}:'\r\n .format(i, number_of_commands))\r\n for c in cmd_list:\r\n c = c.replace('>', '\\\\>')\r\n c = c.replace('|', '\\\\|')\r\n out_lines.append('echo ' + c)\r\n out_lines.append('date')\r\n\r\n #Load modules\r\n if cmd.load_module:\r\n for module in cmd.load_module:\r\n out_lines.append(module)\r\n\r\n #The command\r\n out_lines += cmd_list\r\n\r\n #Unload modules\r\n if cmd.unload_module:\r\n for module in cmd.unload_module:\r\n out_lines.append(module)\r\n out_lines.append('echo Finished at:')\r\n out_lines.append('date')\r\n\r\n #Open and write command lines\r\n fl_name = '{0}_output_{1}.sh'.format(NAME, START_TIME)\r\n output_file_path = os.path.join(output_dir, fl_name)\r\n try:\r\n out_fl = open(output_file_path, 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(output_dir,\r\n fl_name)))\r\n out_fl.write('#!/usr/bin/env bash\\n')\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.close()\r\n return [output_file_path]",
"def airflow_commands():\n pass",
"def main(input_params):\n\n store = kgenlib.BaseStore()\n\n input_files = input_params[\"files\"]\n output_file = input_params.get(\"output_file\")\n\n for file in input_files:\n store.add(kgenlib.BaseStore.from_yaml_file(file))\n\n mutations = input_params.get(\"mutations\", {})\n store.process_mutations(mutations)\n return store.dump(output_filename=output_file)",
"def run(self):\n import inspect\n command_map = {\n 'find-clusters': eos.find_clusters,\n 'predict-observables': eos.predict_observables,\n 'sample-mcmc': eos.sample_mcmc,\n 'sample-pmc': eos.sample_pmc,\n }\n\n for idx, step in enumerate(self._steps):\n if type(step) is not dict:\n raise ValueError(\"Step #{} is not a key/value map.\")\n\n if 'command' not in step:\n raise ValueError(\"Step #{} contains no command.\")\n\n command = step['command']\n func = command_map[command]\n params = step['parameters'] if 'parameters' in step else {}\n params = { params_map[(command, k)]: v for k, v in params.items() }\n paramstr = ','.join(['{k}={v}'.format(k=k,v=v) for k, v in params])\n\n func_sig = inspect.signature(func)\n func_required_args = {}\n for n, p in func_sig.parameters.items():\n if p.default() != p.empty():\n continue\n func_required_args += { n }\n for n in func_required_args:\n if n in params.keys():\n continue\n eos.error('Mandatory argument \\'{}\\' not provided'.format(n))\n return\n\n eos.info('Beginning step #{i}: {cmd}({params})'.format(i=i,cmd=cmd, params=paramstr))\n func(**params)\n eos.info('Step #{i} complete'.format(i=i))",
"def file_input_run(self, filename):\r\n file = open(filename)\r\n file_content = file.read()\r\n content_list = file_content.split(\"\\n\")\r\n for command in content_list:\r\n if command == \"exit\":\r\n break\r\n try:\r\n self.input_mang.process_input(command)\r\n except Exception as e:\r\n print(e)",
"def run(self, commands: list[str]):\n ...",
"def do_workflow(self, arg=None):\n\n def add_steps_to_workflow(curr_flow):\n while True:\n cmd_call = simple_input('Please choose a command to add to the workflow.', cmds, True)\n if cmd_call not in ['DONE', 'EXIT']:\n if self.is_output_cmd(cmd_call):\n curr_flow.add_output(cmd_call)\n else:\n curr_flow.add_step(cmd_call)\n cmds.pop(cmds.index(cmd_call))\n\n _conf = simple_input('Do you want to configure this command?', ['Y','N'], True) if self.is_configureable(cmd) else None\n if _conf == 'Y':\n curr_flow.configure_step(cmd_call)\n\n elif cmd_call == 'DONE':\n break\n else:\n return\n return curr_flow.has_steps()\n\n def confirm_workflow(curr_flow):\n checks = [('START', 'Start workflow?'), ('ADD', 'Do you want to add more steps?'),\n ('RESTART', 'Do you want to start over?')]\n curr_flow.draw_steps()\n for check in checks:\n _continue = simple_input(check[1], ['Y', 'N', 'EXIT'])\n if _continue == 'Y':\n return check[0]\n if _continue == 'EXIT':\n return 'EXIT'\n return 'INVALID'\n\n print('Preparing Workflow Wizard...')\n options = sorted(self.cmds + self.output_cmds)\n from smores.workflow import Workflow\n workflow = Workflow(self)\n target, load_type = self.validate_args('', 'file')\n if target:\n _l = True if target in self.inputs['files'].keys() else False\n workflow.add_target(target, load_type, _l)\n print('Please choose the commands you would like to add to the workflow.'\n '\\nCommands will be executed in the order in which they are added.'\n '\\n\\nPlease note that some commands have dependencies that must be satisfied. An overview of '\n 'command dependencies is available on the main SMOREs wiki on Github')\n print('\\nAvailable Commands for WorkFlow')\n cmds = []\n for i, _o in enumerate(options):\n print('{1}'.format(i, _o))\n cmds.append(_o)\n cmds.append('DONE')\n steps_added = add_steps_to_workflow(workflow)\n while steps_added:\n _run = confirm_workflow(workflow)\n if _run == 'START':\n break\n elif _run == 'ADD':\n _ = add_steps_to_workflow(workflow)\n elif _run == 'RESTART':\n self.do_workflow('')\n else:\n return\n workflow.run()\n print('Workflow has completed.')\n return\n\n else:\n print('Workflows currently have to be setup without the file already being loaded.')\n return",
"def run(self, input_files, input_metadata, output_files):\n try:\n # Set and check execution directory. If not exists the directory will be created.\n execution_path = os.path.abspath(self.configuration.get('execution', '.'))\n execution_parent_dir = os.path.dirname(execution_path)\n if not os.path.isdir(execution_parent_dir):\n os.makedirs(execution_parent_dir)\n\n # Update working directory to execution path\n os.chdir(execution_path)\n logger.debug(\"Execution path: {}\".format(execution_path))\n\n # Set file names for output files (with random name if not predefined)\n for key in output_files.keys():\n if output_files[key] is not None:\n pop_output_path = os.path.abspath(output_files[key])\n self.populable_outputs[key] = pop_output_path\n output_files[key] = pop_output_path\n else:\n errstr = \"The output_file[{}] can not be located. Please specify its expected path.\".format(key)\n logger.error(errstr)\n raise Exception(errstr)\n\n logger.debug(\"Init execution of the Machine Learning Model generation\")\n # Prepare file paths\n for key in input_files.keys():\n if key == 'radiomic_features':\n dataset = input_files[key]\n elif key == 'ML_technique':\n ml = input_files[key]\n else:\n logger.debug('Unrecognized input file key {}'.format(key))\n continue\n\n\n\n output_metadata = {}\n for key in output_files.keys():\n \n logger.info('VRE_ML: Iterating over Key {}'.format(key))\n\n \n if os.path.isfile(output_files[key]):\n meta = Metadata()\n meta.file_path = output_files[key] # Set file_path for output files\n \n logger.info('VRE_ML: Update metadata with key {} and value {}'.format(key, meta.file_path))\n\n meta.data_type = 'tool_statistics'\n meta.file_type = 'PDF'\n\n # Set sources for output files\n meta.sources = [output_files[key]+'.pdf']\n # Generate model\n generate_model.run(dataset=dataset,output_files[key]+'.pdf')\n\n # Append new element in output metadata\n logger.info('VRE_ML: Update metadata with key {} and value {}'.format(key, meta.file_path))\n output_metadata.update({key: meta})\n\n else:\n logger.warning(\"Output {} not found. Path {} not exists\".format(key, output_files[key]))\n\n logger.debug(\"Output metadata created\")\n\n return output_files, output_metadata\n\n except Exception:\n errstr = \"VRE ML RUNNER pipeline failed. See logs\"\n logger.fatal(errstr)\n raise Exception(errstr)",
"def generate_commands(\n sourcedir,\n source_filelist,\n outdir,\n replacement_file,\n replacement_dict,\n ignore_ext,\n include_only_ext,\n use_symlink\n):\n cmd_params = []\n for root, directories, files in os.walk(sourcedir):\n print(f\"\\nRoot directory is {root}.\")\n new_outdir = os.path.join(outdir, replace_string(os.path.relpath(root, start=sourcedir), replacement_dict))\n for filename in files:\n outfilename = replace_string(filename, replacement_dict)\n infilepath = os.path.join(root, filename)\n outfilepath = os.path.join(new_outdir, outfilename)\n\n # Decide whether to ignore current file.\n has_included_extension = any([filename.endswith(ext) for ext in include_only_ext])\n included_in_filelist = source_filelist is None or os.path.realpath(infilepath) in source_filelist\n if not has_included_extension or not included_in_filelist:\n print(f\"Ignoring: {filename}\")\n continue\n # Only create the new output directory if it has any files to process that ends up in it.\n if not os.path.isdir(new_outdir):\n os.mkdir(new_outdir)\n\n # Depending on filetype, we run different commands on them.\n # This is where we start accumulating commandline arguments for each file.\n # We will run them in a scheduler once we have all the commands as a list.\n if filename.endswith(\".bam\"):\n cmd = f\"replace_string --infilepath {infilepath} --outfilepath {outfilepath} \" \\\n f\"--replacement_file {replacement_file} --num_thread 1\"\n cmd_params.append((cmd, None, None))\n elif any([filename.endswith(ending) for ending in ignore_ext]):\n if use_symlink:\n cmd = f\"ln -s {infilepath} {outfilepath}\"\n else:\n cmd = f\"cp {infilepath} {outfilepath}\"\n cmd_params.append((cmd, None, None))\n else:\n cmd = f\"replace_string --infilepath {infilepath} --outfilepath {outfilepath} \" \\\n f\"--replacement_file {replacement_file} --num_thread 1\"\n cmd_params.append((cmd, None, None))\n return cmd_params",
"def steps(self):\n\n if not os.path.exists(self.build_path):\n raise exceptions.ProjectNotBuildError()\n\n steps = []\n for filename in os.listdir(self.build_path):\n match = re.match(r'(\\d{4})_(.*)\\.json', filename)\n if not match:\n continue\n\n with open(os.path.join(self.build_path, filename), 'r') as f:\n template = json.loads(f.read())\n\n template_type = 'custom' if '_type' in template else 'cloudformation'\n steps.append((int(match.groups()[0]), match.groups()[1], filename, template_type))\n steps = sorted(steps, key=lambda x: x[0])\n\n return steps",
"def generate_files(self):\n\t\tapply_stemmer, xml_file, query_file, expected_file = self.read_config_file()\n\t\tself.generate_query_file(query_file, xml_file, apply_stemmer)\n\t\tself.generate_expected_file(expected_file, xml_file)\n\t\tlogging.info('FINALIZADO: MÓDULO PROCESSADOR DE CONSULTAS')",
"def _insertAllSteps(self):\n \n # Get pointer to input micrographs \n self.particlePickingRun = self.xmippParticlePicking.get()\n \n copyId = self._insertFunctionStep('copyInputFilesStep')\n # Get micrographs to pick\n #self.inputMicrographs.set(self.getInputMicrographs())\n \n deps = []\n for mic in self.getInputMicrographs():\n stepId = self._insertFunctionStep('autopickMicrographStep', mic.getFileName(), prerequisites=[copyId])\n deps.append(stepId)\n \n self._insertFunctionStep('_createOutput',self._getExtraPath(), prerequisites=deps)",
"def run(self):\n for command in CUSTOM_COMMANDS:\n self.run_custom_command(command)",
"def generate_commands(self):\n if self.final_directory:\n dest_base = os.path.join(self.working_dir, self.final_directory)\n else:\n dest_base = self.working_dir\n dirnames = set({dest_base})\n mv_commands = []\n\n # commands to move files\n keep_files = [os.path.relpath(x, self.common_path) for x in self.keep_files]\n for file in keep_files:\n if self.is_dir_structure_flat:\n dest = os.path.join(dest_base, os.path.basename(file))\n else:\n dest = os.path.join(dest_base, self.common_path, file)\n dirnames.add(os.path.dirname(dest))\n mv_commands.append(f\"mv -f {file} {dest}\")\n\n # commands to move directories\n keep_directories = [\n os.path.relpath(x, self.common_path) for x in self.keep_directories\n ]\n for directory in keep_directories:\n dest = dest_base\n if self.is_dir_structure_flat:\n source = os.path.join(directory, \"*\")\n else:\n source = directory\n if self.common_path:\n dest = os.path.join(dest_base, self.common_path)\n mv_commands.append(f\"mv -f {source} {dest}\")\n\n mkdir_commands = []\n for directory in dirnames:\n mkdir_commands.append(f\"mkdir -p {directory}\")\n\n self._commands = sorted(mkdir_commands) + mv_commands",
"def generate_all_files():\n for (name, fn) in lang_module.targets.items():\n path = of_g.options.install_dir + '/' + name\n os.system(\"mkdir -p %s\" % os.path.dirname(path))\n with open(path, \"w\") as outfile:\n fn(outfile, os.path.basename(name))\n print(\"Wrote contents for \" + name)",
"def seq2science_parser(workflows_dir=\"./seq2science/workflows/\"):\n # setup the parser\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-v\", \"--version\", action=\"version\", version=f\"seq2science: v{seq2science.__version__}\")\n subparsers = parser.add_subparsers(dest=\"command\")\n subparsers.required = True\n init = subparsers.add_parser(\n \"init\",\n help=\"Initialise a workflow with an example config and samples file.\",\n description=\"Each workflow requires a configuration and samples file to run. \"\n 'Running \"seq2science init {workflow}\" initialises a default '\n \"configuration and samples file for the specific workflow.\",\n )\n global run\n run = subparsers.add_parser(\n \"run\",\n help=\"Run a complete workflow.\",\n description=\"Run a complete workflow. This requires that a config and samples file \"\n \"are either present in the current directory, or passed as an argument.\",\n )\n explain = subparsers.add_parser(\n \"explain\",\n help=\"Write a materials & methods section.\",\n description=\"Explains what has/will be done for the workflow. This prints a string which can serve\"\n \" as a skeleton for your material & methods section.\",\n )\n clean = subparsers.add_parser( # noqa: F841\n \"clean\",\n help=\"Remove all cached sample files and conda environments.\",\n description=\"At the start of each workflow run, seq2science starts with installing environments for each \"\n \"rule. It also stores the GEO soft files of public samples in its cache. These environments can get\"\n \" large and it might be best to remove them when you are done with an analysis. \\n\"\n \"seq2science clean will clean up these files for you.\",\n )\n docs = subparsers.add_parser( # noqa: F841\n \"docs\",\n description=\"The docs command tries to open your browser and open the docs' webpage, \"\n \"if that didn't work it prints the url.\",\n help=\"Take me to the docs!\",\n )\n\n # init, run and explain can use all workflows\n for subparser in [init, run, explain]:\n subparser.add_argument(\n \"workflow\", metavar=\"WORKFLOW\", choices=[dir.replace(\"_\", \"-\") for dir in os.listdir(workflows_dir)]\n )\n\n # init arguments\n init.add_argument(\n \"--dir\",\n default=\".\",\n metavar=\"PATH\",\n help=\"The path to the directory where to initialise the config and samples files.\",\n )\n\n init.add_argument(\n \"-f\",\n \"--force\",\n default=False,\n help=\"Overwrite existing samples.tsv and config.yaml silently.\",\n action=\"store_true\",\n )\n\n global core_arg\n core_arg = run.add_argument(\n \"-j\",\n \"--cores\",\n metavar=\"N\",\n type=int,\n # required=True, # --dryruns and --profile can overwrite None\n help=\"Use at most N cores in parallel. Must be at least 2. When \"\n \"executing on a cluster, this number controls the maximum number\"\n \"of parallel jobs.\",\n )\n run.add_argument(\n \"-n\", \"--dryrun\", help=\"Do not execute anything, and display what would be done.\", action=\"store_true\"\n )\n run.add_argument(\"-r\", \"--reason\", help=\"Print the reason for each executed rule.\", action=\"store_true\")\n run.add_argument(\n \"--skip-rerun\", help=\"Skip the check if samples or configuration has been changed.\", action=\"store_true\"\n )\n run.add_argument(\"-k\", \"--keep-going\", help=\"Go on with independent jobs if a job fails.\", action=\"store_true\")\n run.add_argument(\n \"--rerun-incomplete\", help=\"Re-run all jobs the output of which is recognized as incomplete.\", action=\"store_true\"\n )\n run.add_argument(\"--unlock\", help=\"Remove a lock on the working directory.\", action=\"store_true\")\n run.add_argument(\"--cleanup-metadata\", help=\"Just cleanup metadata of given list of output files (default None).\", default=None, nargs=\"+\")\n explain.add_argument(\"--hyperref\", help=\"Print urls as html hyperref\", action=\"store_true\")\n # run/explain arguments\n for subparser in [run, explain]:\n subparser.add_argument(\n \"--snakemakeOptions\",\n nargs=\"+\",\n action=_StoreDictKeyPair,\n metavar=\"KEY=VAL\",\n help=\"Extra arguments to pass along to snakemake. An example could be seq2science run \"\n \"alignment --cores 12 --snakemakeOptions resources={mem_gb:100} local_cores=3. \"\n \"Here we pass local_cores as KEY=VALUE and additional resources can even be passed along in a dictionary. \"\n \"Take a look at the snakemake API for a complete list of all possible options: \"\n \"https://snakemake-api.readthedocs.io/en/latest/api_reference/snakemake.html\",\n )\n global profile_arg\n profile_arg = subparser.add_argument(\n \"-p\",\n \"--profile\",\n metavar=\"PROFILE NAME\",\n help=\"Use a seq2science profile. Profiles can be taken from: https://github.com/s2s-profiles\",\n )\n subparser.add_argument(\n \"-c\",\n \"--configfile\",\n default=\"./config.yaml\",\n metavar=\"FILE\",\n help=\"The path to the config file.\",\n )\n subparser.add_argument(\n \"--debug\",\n action=\"store_true\",\n help=\"\"\"For developers \"only\": prints helpful error messages to debug issues.\"\"\",\n )\n\n # enable tab completion\n # exclusion only works on the main parser unfortunately, but it's better than nothing,\n # plus it might be supported later?\n argcomplete.autocomplete(parser, exclude=[\"-c\", \"-p\", \"-k\" \"-r\" \"-n\", \"-j\", \"-h\", \"-v\"])\n\n return parser",
"def get_input_files(self, action):\n assert action == \"run\", \"Unsupported action\"\n return self.path_tpl.format(infix=\"\", ext=\".bam\")",
"def __prepare_input_files_locally(self, job_wrapper):\n prepare_input_files_cmds = getattr(job_wrapper, 'prepare_input_files_cmds', None)\n if prepare_input_files_cmds is not None:\n for cmd in prepare_input_files_cmds: # run the commands to stage the input files\n if 0 != os.system(cmd):\n raise Exception('Error running file staging command: %s' % cmd)\n job_wrapper.prepare_input_files_cmds = None # prevent them from being used in-line",
"def get_input_files(self, action):\n\n def input_function(wildcards):\n \"\"\"Helper rapper function\"\"\"\n return expand(\n self.base_path_in.format(wildcards=wildcards),\n postproc=[self._get_postproc_token()],\n ext=self.extensions,\n )\n\n assert action == \"run\", \"Unsupported action\"\n return input_function",
"def create_inputs_recipe():\n module_name, _ = os.path.splitext(os.path.basename(__file__))\n path = os.path.join(CREATED_INPUTS_PATH_FOR_TESTS, module_name)\n os.makedirs(path, exist_ok=True)\n os.chdir(path)\n os.makedirs(\"inputs/\", exist_ok=True)\n print('Current working directory:\\n {:s}'.format(os.getcwd()))\n\n for filename, _ in input_pars:\n print('Downloading files...')\n basename = filename.split(\"_\")[0] + \".fits\"\n sci_path = download_from_archive(basename)\n sci_ad = astrodata.open(sci_path)\n data_label = sci_ad.data_label()\n\n print('Reducing pre-processed data:')\n logutils.config(file_name='log_{}.txt'.format(data_label))\n p = GNIRSLongslit([sci_ad])\n p.prepare(bad_wcs=\"fix\")\n p.addDQ()\n p.addVAR(read_noise=True)\n p.ADUToElectrons()\n p.addVAR(poisson_noise=True)\n # p.flatCorrect()\n p.makeIRAFCompatible()\n\n os.chdir(\"inputs/\")\n processed_ad = p.writeOutputs().pop()\n os.chdir(\"../\")\n print('Wrote pre-processed file to:\\n'\n ' {:s}'.format(processed_ad.filename))",
"def gen_tasks(self):\n self.kw = {\n 'image_srcset_sizes': self.site.config['IMAGE_SRCSET_SIZES'],\n 'image_srcset_format': self.site.config['IMAGE_SRCSET_FORMAT'],\n 'extra_image_extensions': self.site.config['EXTRA_IMAGE_EXTENSIONS'],\n 'max_image_size': self.site.config['MAX_IMAGE_SIZE'],\n 'image_folders': self.site.config['IMAGE_FOLDERS'],\n 'output_folder': self.site.config['OUTPUT_FOLDER'],\n 'filters': self.site.config['FILTERS'],\n 'preserve_exif_data': self.site.config['PRESERVE_EXIF_DATA'],\n 'exif_whitelist': self.site.config['EXIF_WHITELIST'],\n 'preserve_icc_profiles': self.site.config['PRESERVE_ICC_PROFILES'],\n }\n\n self.image_ext_list = self.image_ext_list_builtin\n self.image_ext_list.extend(self.site.config.get('EXTRA_IMAGE_EXTENSIONS', []))\n\n yield self.group_task()\n for src in self.kw['image_folders']:\n dst = self.kw['output_folder']\n filters = self.kw['filters']\n real_dst = os.path.join(dst, self.kw['image_folders'][src])\n for task in self.process_tree(src, real_dst):\n task['basename'] = self.name\n task['uptodate'] = [utils.config_changed(self.kw)]\n yield utils.apply_filters(task, filters)",
"def execute(self):\n if self._cli_arguments.get('<samplename>') == 'cfn':\n generate_sample_cfn_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'static-angular':\n generate_sample_static_angular(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'static-react':\n generate_sample_static_react(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'sls-py':\n generate_sample_sls_module(self.env_root, 'sls-py')\n elif self._cli_arguments.get('<samplename>') == 'sls-tsc':\n generate_sample_sls_module(self.env_root, 'sls-tsc')\n elif self._cli_arguments.get('<samplename>') == 'stacker':\n generate_sample_stacker_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'tf':\n generate_sample_tf_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'k8s-cfn-repo':\n generate_sample_k8s_cfn_repo(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'k8s-tf-repo':\n generate_sample_k8s_tf_repo(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'cdk-tsc':\n generate_sample_cdk_tsc_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'cdk-py':\n generate_sample_cdk_py_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'cdk-csharp':\n generate_sample_cdk_cs_module(self.env_root)\n else:\n LOGGER.info(\"Available samples to generate:\")\n for i in ['cfn', 'static-angular', 'static-react', 'sls-tsc',\n 'sls-py', 'tf', 'k8s-cfn-repo', 'k8s-tf-repo',\n 'stacker', 'cdk-tsc', 'cdk-py', 'cdk-csharp']:\n print(i)",
"def commands():",
"def task_process(args):\n if args.mode == 'change model':\n for i in range(config.center_len):\n h, w = config.center_list[i][0], config.center_list[i][1]\n os.system('rm -rf ctpn_change_{}x{}.onnx'.format(h, w))\n for i in range(config.center_len):\n h, w = config.center_list[i][0], config.center_list[i][1]\n os.system('{} change_model.py --input_path={}/ctpn_{}x{}.onnx --output_path={}/ctpn_change_{}x{}.onnx' \\\n .format(args.interpreter, args.src_dir, h, w,args.res_dir, h, w)) \n if args.mode == 'preprocess':\n for i in range(config.center_len):\n os.system('mkdir -p {}_{}x{}'.format(args.res_dir, config.center_list[i][0], config.center_list[i][1]))\n os.system('{} ctpn_preprocess.py --src_dir={} --save_path={}' \\\n .format(args.interpreter, args.src_dir, args.res_dir))\n if args.mode == 'ais_infer':\n fps_all = 0\n os.system('mkdir -p {}/inf_output'.format(args.res_dir))\n for i in range(config.center_len):\n h, w = config.center_list[i][0], config.center_list[i][1]\n\n os.system('{} --model={} --input={}_{}x{} --dymHW {},{} --device {} --batchsize={} --output={}/inf_output' \\\n .format(args.interpreter, args.om_path, args.src_dir ,h , w, h, w,args.device, args.batch_size, args.res_dir))\n\n sumary_path = glob.glob('{}/inf_output/*ary.json'.format(args.res_dir))[0]\n with open(sumary_path, 'r') as f:\n output = json.load(f)\n throughput = output['throughput'] \n fps_all = fps_all + throughput * config.center_count[i]\n os.system('rm -f {}'.format(sumary_path))\n os.system('mv {}/inf_output/*/*.bin {}'.format(args.res_dir, args.res_dir))\n os.system('rm {}/inf_output -rf'.format(args.res_dir))\n fps_all = fps_all / config.imgs_len\n print(\"====performance data====\")\n print('CTPN bs{} models fps:{}'.format(args.batch_size, fps_all))",
"def _get_job_commands(self,\r\n input_fp,\r\n output_dir,\r\n params,\r\n job_prefix,\r\n working_dir,\r\n command_prefix='/bin/bash; ',\r\n command_suffix='; exit'):\r\n commands = []\r\n result_filepaths = []\r\n\r\n sids = parse_biom_table(open(input_fp, 'U')).SampleIds\r\n\r\n if params['full_tree']:\r\n full_tree_str = '-f'\r\n else:\r\n full_tree_str = ''\r\n\r\n if params['tree_path']:\r\n tree_str = '-t %s' % params['tree_path']\r\n else:\r\n tree_str = ''\r\n\r\n metrics = params['metrics']\r\n\r\n # this is a little bit of an abuse of _merge_to_n_commands, so may\r\n # be worth generalizing that method - this determines the correct\r\n # number of samples to process in each command\r\n sample_id_groups = self._merge_to_n_commands(sids,\r\n params['jobs_to_start'],\r\n delimiter=',',\r\n command_prefix='',\r\n command_suffix='')\r\n\r\n for i, sample_id_group in enumerate(sample_id_groups):\r\n working_dir_i = join(working_dir, str(i))\r\n create_dir(working_dir_i)\r\n output_dir_i = join(output_dir, str(i))\r\n create_dir(output_dir_i)\r\n result_filepaths.append(output_dir_i)\r\n input_dir, input_fn = split(input_fp)\r\n input_basename, input_ext = splitext(input_fn)\r\n sample_id_desc = sample_id_group.replace(',', '_')\r\n output_fns = ['%s_%s.txt' % (metric, input_basename)\r\n for metric in metrics.split(',')]\r\n rename_command, current_result_filepaths = self._get_rename_command(\r\n output_fns, working_dir_i, output_dir_i)\r\n\r\n result_filepaths += current_result_filepaths\r\n\r\n bdiv_command = '%s -i %s -o %s %s -m %s %s -r %s' %\\\r\n (self._script_name,\r\n input_fp,\r\n working_dir_i,\r\n tree_str,\r\n params['metrics'],\r\n full_tree_str,\r\n sample_id_group)\r\n\r\n shell_script_fp = '%s/%s%d.sh' % (working_dir_i, job_prefix, i)\r\n shell_script_commands = [bdiv_command] + rename_command.split(';')\r\n self._commands_to_shell_script(shell_script_commands,\r\n shell_script_fp)\r\n commands.append('bash %s' % shell_script_fp)\r\n\r\n commands = self._merge_to_n_commands(commands,\r\n params['jobs_to_start'],\r\n command_prefix=command_prefix,\r\n command_suffix=command_suffix)\r\n\r\n return commands, result_filepaths"
] | [
"0.65493697",
"0.61751366",
"0.61644983",
"0.6146389",
"0.58796287",
"0.58742994",
"0.5830146",
"0.5827008",
"0.5818108",
"0.5804637",
"0.57874596",
"0.5779958",
"0.5779178",
"0.57518756",
"0.571343",
"0.56952333",
"0.5652398",
"0.56454384",
"0.56206775",
"0.5620343",
"0.55770165",
"0.5544376",
"0.55398387",
"0.5532961",
"0.5518515",
"0.54145736",
"0.5396328",
"0.53955793",
"0.53946835",
"0.5385353"
] | 0.6298623 | 1 |
Reports which commands have not been successfully run. Commands found in staplefile are compared with files found in directory stack to identify which commands have failed. | def validate_run_results(input_file_parameters, dir_stack):
prev_command_had_output_dir = True
dir_stack_index = -1
command_index = 0
for current_command in input_file_parameters.commands:
# Skip over SPLIT commands
if current_command == 'SPLIT':
continue
command_index += 1
if prev_command_had_output_dir:
dir_stack_index += 1
# Keep track of number of commands created in the current workflow step
number_of_successful_commands = 0
# Infer command type, parameters, input and output directories
command_type, command_parameters = \
utils.parse_staplefile_command_line(current_command)
in_dir = dir_stack[dir_stack_index]
if command_type.require_output_dir:
out_dir = dir_stack[dir_stack_index+1]
prev_command_had_output_dir = True
else:
out_dir = in_dir
prev_command_had_output_dir = False
# Read files until command class finds no more valid input files
number_of_potential_commands = 0
while True:
try:
# The command instance is generated without exceptions if the
# command execution has failed (i.e. expected output
# file does not exist). Otherwise NewFileError is raised.
current_command = command_type(command_parameters, in_dir, out_dir)
except STAPLERerror.NewFileExists:
number_of_successful_commands += 1
number_of_potential_commands += 1
continue
except STAPLERerror.VirtualIOError:
break
number_of_potential_commands += 1
# Print validation results
if not number_of_successful_commands:
print '{0} command (step number {1}) has not been run.' \
.format(command_type.name, command_index)
continue
if number_of_successful_commands == number_of_potential_commands:
print '{0} command (step number {1}) has been run succesfully.' \
.format(command_type.name, command_index)
else:
print '{0} command (step number {1}) workflows have failed {2}/{3} times' \
.format(command_type.name, command_index,
number_of_potential_commands - number_of_successful_commands,
number_of_potential_commands) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_run_logs(input_file_parameters, dir_stack):\r\n # Check resource manager produced .out and .err files for assumed error\r\n # messages.\r\n print 'Checking runtime log files for error messages...'\r\n file_names = os.listdir(input_file_parameters.output_dir)\r\n\r\n newest_fix_index = 0\r\n files_to_check = []\r\n for file_name in sorted(file_names):\r\n if file_name.endswith('.out') or file_name.endswith('.err'):\r\n if file_name.startswith('FIX_'):\r\n current_fix_index = file_name.split('_')[1]\r\n if int(current_fix_index) > newest_fix_index:\r\n newest_fix_index = current_fix_index\r\n files_to_check = [file_name]\r\n else:\r\n files_to_check.append(file_name)\r\n if newest_fix_index == 0:\r\n files_to_check.append(file_name)\r\n\r\n if newest_fix_index > 0:\r\n print 'Workflow has been fixed {0} times. Checking only the {1} .out ' \\\r\n 'and .err files of the newest run.'.format(newest_fix_index,\r\n len(files_to_check))\r\n\r\n i = 0\r\n number_of_warnings = 0\r\n warning_strings = ['invalid', 'exception', 'warning']\r\n error_strings = ['error', 'segmentation fault', 'canceled', '(err):']\r\n skip_strings = ['adapters with at most',\r\n 'no. of allowed errors',\r\n 'error counts']\r\n for file_name in files_to_check:\r\n handle = open(os.path.join(input_file_parameters.output_dir,\r\n file_name))\r\n\r\n i += 1\r\n finish_string_exists = False\r\n warning_messages = []\r\n error_messages = []\r\n j = 0\r\n for line in handle:\r\n j += 1\r\n line = line.lower()\r\n if any(s in line for s in skip_strings):\r\n continue\r\n if any(w in line for w in warning_strings):\r\n warning_messages.append(j)\r\n number_of_warnings += 1\r\n if any(e in line for e in error_strings):\r\n error_messages.append(j)\r\n number_of_warnings += 1\r\n if 'finished at:' in line:\r\n finish_string_exists = True\r\n if os.path.splitext(file_name)[1] == '.out':\r\n if not finish_string_exists:\r\n error_messages.append('This thread has not been finished:\\n{0}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_name)))\r\n\r\n if warning_messages or error_messages:\r\n print '\\n\\nThe following file contains possible warning/error messages:'\r\n print os.path.join(input_file_parameters.output_dir, file_name)\r\n if len(warning_messages) != 0:\r\n print '\\nWarning messages on lines:'\r\n print ', '.join(map(str, warning_messages))\r\n if len(error_messages) != 0:\r\n print '\\nError messages on lines:'\r\n print ', '.join(map(str, error_messages))\r\n\r\n print '\\n\\n{0} .out and .err files checked ({1} processes)'.format(i, i/2)\r\n print 'Potential problems detected: {0}'.format(number_of_warnings)",
"def log_dir_stacks_contents(dir_stacks):\r\n for directory in dir_stacks:\r\n logging.info('-'*80)\r\n logging.info('Predicted directory contents of:\\n{0}'\r\n .format(directory.path))\r\n files = directory.file_names\r\n files = sorted(files)\r\n logging.info('Number of files: {0}'.format(len(files)))\r\n logging.info('Files:')\r\n logging.info('\\t'.join(files))",
"def sense(self):\n\n partition_folder = self.getPartitionFolder()\n log_folder = os.path.join(partition_folder, 'var/log')\n log_name = 'slapgrid-%s-error.log' % self.getConfig('partition-id')\n slapgrid_error_log_file = os.path.join(partition_folder, '.%s' % log_name)\n link_file = os.path.join(log_folder, log_name)\n monitor_url = self.getConfig('monitor-url')\n message = ''\n if os.path.exists(slapgrid_error_log_file) and \\\n os.stat(slapgrid_error_log_file).st_size:\n message = 'Buildout failed to process %s.' % self.getConfig('partition-id')\n if monitor_url:\n message += '\\nSee %s/log/%s for more information.' % (monitor_url, log_name)\n if not os.path.exists(link_file):\n os.symlink(slapgrid_error_log_file, link_file)\n else:\n if os.path.exists(link_file):\n os.unlink(link_file)\n\n if message:\n self.logger.error(message)\n else:\n self.logger.info(\"buildout is OK\")",
"def run_commands(commands):\n msg = \"\"\n for cmd, send_stdout in commands: \n msg += \"<hr/>\\n\"\n log.info(\"Attempting to run command {}\".format(cmd))\n try:\n p = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n p.wait()\n except Exception:\n m = (\"<h2 style=\\\"color:red\\\">Failed to run <code>{}</code></h2>\\n\"\n .format(escape(cmd)))\n msg += m\n log.exception(html_to_text(m).strip())\n else:\n if p.returncode == 0:\n m = (\"<h2>Successfully ran <code>{}</code></h2>\\n\"\n .format(escape(cmd)))\n msg += m\n log.info(html_to_text(m).strip())\n else:\n m = (\"<h2 style=\\\"color:red\\\">Failed to run <code>{}</code>\"\n \"</h2>\\n\".format(escape(cmd)))\n msg += m\n log.warn(html_to_text(m).strip())\n\n stderr = p.stderr.read()\n stdout = p.stdout.read() \n \n if (send_stdout or stderr) and stdout:\n msg += (\"<h3>stdout</h3>\\n <pre>{}</pre>\\n\"\n .format(escape(stdout)))\n \n if stderr:\n msg += \"<h3 style=\\\"color:red\\\">stderr</h3>\\n\"\n msg += (\"<pre style=\\\"color:red\\\">{}</pre>\\n\"\n .format(escape(stderr)))\n\n return msg",
"def validate_services(self, commands):\n for k, v in commands.iteritems():\n for cmd in v:\n output, code = k.run(cmd)\n if code != 0:\n return \"command `{}` returned {}\".format(cmd, str(code))\n return None",
"def handle_command(self, commands):\n print config.CMD_PROMPT + \" calling commands...\"\n \n success = False\n\n print config.CMD_PROMPT + \" Beginning Command Chain\"\n for command in commands:\n success = self.recursive_execute(command)\n # Is there going to be complex results checking and handling code?\n\n print config.CMD_PROMPT + \" Command Chain Completed\"\n return success",
"def check_systtests_pickle_files(self):\n # Make sure that there have been no more new scan points run since this\n # last processing. To do this, get the number of output directories\n # Compare this to the number in the pickle files.\n self.num_systematics = {}\n for basename in nsort(os.listdir(self.logdir)):\n if 'pckl' in basename:\n continue\n basename_content = nsort(\n os.listdir(os.path.join(self.logdir, basename))\n )\n # This means it is a directory containing something useful\n if 'config_summary.json' in basename_content:\n bits = basename.split('toy_')[-1].split('_')\n toyname = None\n add_bit = True\n for bit in bits:\n if bit == '' or bit == 'inj':\n add_bit = False\n if add_bit:\n if toyname is None:\n toyname = bit\n else:\n toyname += '_%s'%bit\n if '_full_syst_baseline' in toyname:\n toyname = toyname.split('_full_syst_baseline')[0]\n toyname = 'toy_%s_asimov'%toyname\n if toyname not in self.num_systematics.keys():\n self.num_systematics[toyname] = 0\n if 'wrong' in basename:\n # Only want to include each systematic once, but\n # they will have two directions.\n if 'pve' in basename:\n self.num_systematics[toyname] += 1\n else:\n self.num_systematics[toyname] += 1\n data_sets = from_file(os.path.join(self.logdir,\n 'data_sets.pckl'))\n if sorted(data_sets.keys()) != sorted(self.num_systematics.keys()):\n logging.info(\n 'Found files I assume to be from a previous run of'\n ' this processing script containing these truths: %s. '\n 'However, based on the directories in the overall '\n 'output directory there should be these truths: %s, so '\n 'they will be regenerated.'%(\n sorted(data_sets.keys()),\n sorted(self.num_systematics.keys())\n )\n )\n pickle_there = True\n for toyname in sorted(self.num_systematics.keys()):\n if len(data_sets[toyname].keys()) != self.num_systematics[toyname]:\n pickle_there = False\n if pickle_there:\n logging.info(\n 'Found files I assume to be from a previous run of'\n ' this processing script containing %i sytematics. If '\n 'this seems incorrect please delete the files: '\n 'data_sets.pckl, all_params.pckl and labels.pckl '\n 'from the logdir you have provided.'%(\n self.num_systematics[self.num_systematics.keys()[0]])\n )\n else:\n logging.info(\n 'Found files I assume to be from a previous run of'\n ' this processing script containing %i systematics. '\n 'However, based on the number of directories in the overall '\n 'output directory there should be %i systematics in '\n 'these pickle files, so they will be regenerated.'%(\n len(data_sets[data_sets.keys()[0]].keys()),\n self.num_systematics[self.num_systematics.keys()[0]]\n )\n )\n pickle_there = False\n\n return pickle_there",
"def _check_for_errors(self, status):\r\n\r\n # Case when test suite name is misspelled or file doesn't exist\r\n if status == 252:\r\n sys.stderr.write('Test suite \"{}\" was not found in path {}\\n'.format(self.name, self.path))\r\n print 'Return code is {}'.format(status)",
"def error_check(command):\r\n\r\n # TODO\r",
"def errors(job_name, jenkins_username, jenkins_token):\n global server\n# job_name = 'enterprise_pe-acceptance-tests_integration-system_pe_full-upgrade_weekend_2016.4.x' # 'enterprise_pe-orchestrator_intn-van-sys-pez-multi_2016.4.x-2016.4.x' # 'enterprise_pe-modules-vanagon-suite_intn-van-sys-pez-multi_daily-pe-modules-2016.4.x'\n server = Jenkins('https://cinext-jenkinsmaster-enterprise-prod-1.delivery.puppetlabs.net', username=jenkins_username, password=jenkins_token)\n info = server.get_job_info(job_name)\n builds = [server.get_build_info(job_name, build['number']) for build in info['builds']]\n failed_build_numbers = [b for b in builds if b['result'] == 'FAILURE']\n last_job_errors = None\n\n counts = defaultdict(int)\n similar = set()\n for build in failed_build_numbers:\n output = server.get_build_console_output(job_name, build['number'])\n finder = get_strategy(output)\n errors = finder(output)\n print \"Errors: {}\".format(errors)\n if last_job_errors:\n seq = difflib.SequenceMatcher(a=last_job_errors, b=errors)\n if seq.ratio() == 1.0:\n counts['exact'] += 1\n if seq.ratio() >= 0.7 and seq.ratio() < 1.0:\n counts['similar'] += 1\n similar.append(errors)\n else:\n last_job_errors = errors\n\n if last_job_errors:\n click.echo('Last job errors were:')\n click.echo('\\t{}'.format('\\n\\t'.join(last_job_errors)))\n\n if last_job_errors and 'exact' in counts:\n click.echo('There were {} jobs that failed with errors exactly the same as the last failed job:'.format(counts['exact']))\n click.echo('\\t{}'.format('\\n\\t'.join(last_job_errors)))\n\n if last_job_errors and 'similar' in counts:\n click.echo('There were {} jobs that failed with experienced similar errors as the last failed job:'.format(counts['exact']))\n click.echo('\\t{}'.format('\\n\\t'.join(last_job_errors)))\n for s in similar:\n click.echo('Additional Failed Job:')\n click.echo('\\t{}'.format('\\n\\t'.join(s)))",
"def all(\n command,\n):\n # If we get to this point all tests listed in 'pre' have passed\n # unless we have run the task with the --warn flag\n if not command.config.run.warn:\n print(\n \"\"\"\nAll Checks Passed Successfully\n==========================================\n\"\"\"\n )",
"def test_launch_failures(self):\n host = NodeSet(choice(self.server_managers[0].hosts)) # nosec\n self.log.info(\"Creating launch.py failure trigger files on %s\", host)\n failure_trigger = \"00_trigger-launch-failure_00\"\n failure_trigger_dir = os.path.join(self.base_test_dir, failure_trigger)\n failure_trigger_files = [\n os.path.join(self.base_test_dir, \"{}_local.yaml\".format(failure_trigger)),\n os.path.join(os.sep, \"etc\", \"daos\", \"daos_{}.yml\".format(failure_trigger)),\n os.path.join(self.base_test_dir, \"{}.log\".format(failure_trigger)),\n os.path.join(failure_trigger_dir, \"{}.log\".format(failure_trigger)),\n os.path.join(os.sep, \"tmp\", \"daos_dump_{}.txt\".format(failure_trigger)),\n os.path.join(self.tmp, \"valgrind_{}\".format(failure_trigger)),\n ]\n\n self.log.debug(\"Creating %s\", failure_trigger_dir)\n commands = [\n \"sudo -n mkdir -p {}\".format(failure_trigger_dir),\n \"sudo -n {}\".format(get_chown_command(options='-R', file=failure_trigger_dir)),\n ]\n\n local_trigger_file = failure_trigger_files.pop(0)\n self.log.debug(\"Creating %s\", local_trigger_file)\n try:\n with open(local_trigger_file, \"w\", encoding=\"utf-8\") as local_trigger:\n local_trigger.write(\"THIS IS JUST A TEST\\n\")\n except IOError as error:\n self.fail(\"Error writing {}: {}\".format(local_trigger_file, str(error)))\n\n for command in commands:\n if not run_remote(self.log, host, command, timeout=20).passed:\n self.fail(\"Error creating directory {}\".format(failure_trigger_dir))\n\n for failure_trigger_file in failure_trigger_files:\n self.log.debug(\"Creating %s\", failure_trigger_file)\n sudo = \"\" if failure_trigger_file.startswith(self.tmp) else \"sudo -n \"\n commands = [\n \"{}touch {}\".format(sudo, failure_trigger_file),\n \"{}{}\".format(sudo, get_chown_command(options='-R', file=failure_trigger_file)),\n \"echo 'THIS IS JUST A TEST' > {}\".format(failure_trigger_file),\n ]\n for command in commands:\n if not run_remote(self.log, host, command, timeout=20).passed:\n self.fail(\"Error creating file {}\".format(failure_trigger_file))",
"def error_check(output_list):\n #NOTE: Add error messages as you come across them\n #keys = error messages we are looking for\n #values = what we log if the key is found\n known_errors = {\n \"File Not Found\":\"File was not found\",\n \"No such host is known\":\"Failed to connect to Client\"\n }\n #Verify the command ran without errors\n for _line in output_list:\n for key, value in known_errors.items():\n if key in _line:\n log.error(value)\n return False\n return True",
"def test_failed():\n build()\n sh(\"%s %s --last-failed\" % (PYTHON, RUNNER_PY))",
"def try_mitigate_issues_if_any(self, command, code, out):\n if \"Error\" in out or \"Errno\" in out:\n issue_mitigated = self.check_known_issues_and_attempt_fix(out)\n if issue_mitigated:\n self.composite_logger.log_debug('\\nPost mitigation, invoking package manager again using: ' + command)\n code_after_fix_attempt, out_after_fix_attempt = self.env_layer.run_command_output(command, False, False)\n return self.try_mitigate_issues_if_any(command, code_after_fix_attempt, out_after_fix_attempt)\n return code, out",
"def test_nonexistent_report(self):\n command_line = [\"report\", \"notreport\"]\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)",
"def feedback_no_result(self, classname, method):\n logging.debug(\"feedback_no_result(%s, %s)\", classname, method)\n\n logging.info(\"Was not able to find the commit to blame for testcase %s %s\", classname, method)\n\n with open(self.html_file, \"a\") as result_file:\n result_file.write(\"<br/><b>%s - %s is broken</b> but no braking commit found\\n\" % (classname, method))\n with open(self.txt_file, \"a\") as result_file:\n result_file.write(\"%s - %s is broken but no braking commit found\\n\" % (classname, method))\n with open(self.summary_file, \"a\") as result_file:\n result_file.write(\"<br/>Pass on %s#%s\\n\" % (classname.split(\".\")[-1], method))",
"def _checkCommandStatus(self, lastCommand=False):\n p = self.spawnProc\n p.sendline('echo $?')\n regex = re.compile('^[0-9]+',re.M)\n p.expect(regex, 2)\n msg = '_checkCommandStatus : Execution of command FAILED'\n \tif lastCommand:\n \t msg = '_checkCommandStatus :Execution of command : \"%s\" FAILED' %lastCommand\n if p.after != '0' and p.after != '99':\n raise AssertionError(msg)",
"def execute():\r\n arcpy.AddMessage(\"START BCA Processing\")\r\n arcpy.env.workspace = config.temp_data_gdb\r\n arcpy.env.overwriteOutput = True\r\n sys.path.append(config.notif_system_script_folder)\r\n\r\n # Other Variables\r\n arcpy.AddMessage(\"Import toolbox\")\r\n arcpy.ImportToolbox(config.notif_toolbox)\r\n REGEX_FOR_INVALID_CHARS = re.compile(r'[^0-9a-zA-Z]+')\r\n todayDate = datetime.datetime.now().strftime(\"%Y%m%d\")\r\n logFile = file(\r\n config.report_processing_log + \"\\\\\" + todayDate + \"_NotificationSystemLog\" + \".txt\", \"a\")\r\n\r\n\r\n # get all unzipped files uploaded to shared folder\r\n configfiles = [os.path.join(dirpath, f)\r\n for dirpath, dirnames, files in os.walk(config.SharedFolder)\r\n for f in files if f.endswith('.csv') or f.endswith('.xls') or f.endswith('.xlsx') or f.endswith('.XLS')]\r\n\r\n correct_config_files = [f for f in configfiles if \"\\BCAWeeklyPermitReport\\\\\" in f]\r\n\r\n # PREPARE workspace\r\n arcpy.AddMessage(\"Preparing workspace...\")\r\n for BCAreport in correct_config_files:\r\n\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n\r\n MukimConstruct = arcpy.SearchCursor(config.MukimConstructSource)\r\n PermitDateExists = False\r\n\r\n for row in MukimConstruct:\r\n aux = input_file_name[:8]\r\n if \"CORRECTED\" not in BCAreport.upper():\r\n filedate = datetime.datetime.strptime(aux, \"%Y%m%d\")\r\n else:\r\n clean_filename = input_file_name.split(\".\")[0]\r\n filedate = datetime.datetime.strptime(clean_filename[-8:], \"%Y%m%d\")\r\n if filedate == row.PERMIT_DATE and \"CORRECTED\" not in BCAreport.upper():\r\n PermitDateExists = True\r\n break\r\n if PermitDateExists and \"CORRECTED\" not in BCAreport.upper():\r\n PermitDateExistsLog = file(\r\n config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] +\r\n \" file's Permit Date already exists\" + \".log\",\r\n \"a\")\r\n PermitDateExistsLog.write(\r\n \"Permit Date for the file \" + input_file_name + \" already exists in Mukim Construct at \" + str(\r\n datetime.datetime.now()))\r\n logFile.writelines(\r\n \"Permit Date for the file \" + input_file_name + \" already exists in Mukim Construct at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n\r\n else:\r\n\r\n # 00. Creation of geodatabases that will serve as workspaces\r\n logFile.writelines(\"00 Creation of temp gdb starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n if arcpy.Exists(config.TempDataGDB):\r\n arcpy.Delete_management(config.TempDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Temp_data.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Temp_data.gdb\")\r\n\r\n if arcpy.Exists(config.SDEDataGDB):\r\n arcpy.Delete_management(config.SDEDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Source.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Source.gdb\")\r\n\r\n if arcpy.Exists(config.CurrentMukimConstructDataGDB):\r\n arcpy.Delete_management(config.CurrentMukimConstructDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Final_data.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Final_data.gdb\")\r\n\r\n logFile.writelines(\"00 Creation of temp gdb ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 01. Import the base data\r\n logFile.writelines(\"01 Import of base data starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.CurrentMukimConstructDataGDB,\r\n \"MUKIM_CONSTRUCT\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructByProjSource, config.CurrentMukimConstructDataGDB,\r\n \"MUKIM_CONSTRUCT_BYPROJ\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.DepotSource, config.SDEDataGDB, \"DepotBoundary\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.CatchmentSource, config.SDEDataGDB, \"CatchmentBoundary\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.LandlotSource, config.TempDataGDB, \"Land_lot\", \"\", \"\", \"\")\r\n # Calculate the lot key without letter\r\n arcpy.AddField_management(config.LandLot, \"Lotkey_wo_letter\", \"TEXT\", \"\", \"\", \"10\", \"\", \"NULLABLE\", \"NON_REQUIRED\",\r\n \"\")\r\n arcpy.CalculateField_management(config.LandLot, \"Lotkey_wo_letter\", \"!lot_key![:10]\", \"PYTHON\", \"\")\r\n\r\n logFile.writelines(\"01 Import of base data ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n\r\n # START THE LOOP TO PROCESS ALL THE FILES\r\n clcounter = 0\r\n\r\n if len(correct_config_files) == 0:\r\n logFile.writelines(\"No BCA report to process at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n arcpy.AddMessage(\"Processing files...\")\r\n for BCAreport in configfiles:\r\n\r\n clcounter += 1\r\n arcpy.AddMessage(BCAreport)\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n MukimConstruct = arcpy.SearchCursor(config.MukimConstructSource)\r\n PermitDateExists = False\r\n\r\n # CHEKC FILE DATE EXISTS\r\n for row in MukimConstruct:\r\n aux = input_file_name[:8]\r\n if \"CORRECTED\" not in BCAreport.upper():\r\n filedate = datetime.datetime.strptime(aux, \"%Y%m%d\")\r\n else:\r\n clean_filename = input_file_name.split(\".\")[0]\r\n filedate = datetime.datetime.strptime(clean_filename[-8:], \"%Y%m%d\")\r\n if filedate == row.PERMIT_DATE and \"CORRECTED\" not in input_file_name.upper():\r\n PermitDateExists = True\r\n break\r\n\r\n HEADERVALID = True\r\n with xlrd.open_workbook(BCAreport) as wb:\r\n sh = wb.sheet_by_index(0)\r\n for r in range(sh.nrows):\r\n colcount = 0\r\n if sh.row_values(r)[colcount] == 'Error_Message':\r\n HEADERVALID = True\r\n elif sh.row_values(r)[colcount] == 'Project Ref No' or sh.row_values(r)[colcount] == 'Project_Ref_No':\r\n HEADERVALID = True\r\n else:\r\n PermitDateExistsLog = file(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[\r\n 0] + \" file's header format is not acceptable for processing\" + \".log\", \"a\")\r\n PermitDateExistsLog.write(\r\n \"The header format for the file \" + input_file_name + \" is not acceptable for processing at \" + str(\r\n datetime.datetime.now()))\r\n logFile.writelines(\r\n \"The header format for the file \" + input_file_name + \" is not acceptable for processing at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n HEADERVALID = False\r\n break\r\n\r\n if not PermitDateExists and HEADERVALID:\r\n logFile.writelines(\"Starts processing \" + BCAreport + \" at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # Status update to run/not run the SiteInspection Update\r\n Log_SiteInspectionUpdate = file(config.SiteInspectionUpdate, \"w\")\r\n Log_SiteInspectionUpdate.writelines(\"NO\")\r\n Log_SiteInspectionUpdate.close()\r\n\r\n # 02. Import the BCA report to a geodatabase table\r\n logFile.writelines(\"02 Import of table to gdb starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\ConvertedBCAreport_02\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\ConvertedBCAreport_02\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"ConvertedBCAreport_02\", config.TemplateConvertedBCAreport)\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"ConvertedBCAreport_02\", config.TemplateConvertedBCAreport)\r\n if arcpy.Exists(BCAreport[:-5] + '_err' + '.csv'):\r\n # rename old error report\r\n os.remove(BCAreport[:-5] + '_err' + '.csv')\r\n else:\r\n result = \"Error file does not exist\"\r\n if BCAreport.endswith('.xls') or BCAreport.endswith('.xlsx') or BCAreport.endswith('.XLS'):\r\n rows_out = arcpy.InsertCursor(config.BCAReportGDBTable)\r\n fldlist = arcpy.ListFields(config.BCAReportGDBTable)\r\n fldlist.pop(0)\r\n with xlrd.open_workbook(BCAreport) as wb:\r\n sh = wb.sheet_by_index(0)\r\n for r in range(sh.nrows):\r\n colcount = 0\r\n if sh.row_values(r)[colcount] != 'Error_Message':\r\n colcount = 0\r\n else:\r\n colcount = 1\r\n break\r\n for r in range(sh.nrows):\r\n colcounter = colcount\r\n if r > 0:\r\n new_row_out = rows_out.newRow()\r\n for efld in fldlist:\r\n if efld.name <> 'OBJECTID' and efld.name <> 'ConcatFields':\r\n new_row_out.setValue(efld.name, sh.row_values(r)[colcounter])\r\n colcounter += 1\r\n\r\n logFile.writelines(\"Inserting: \" + str(new_row_out) + \"\\n\")\r\n rows_out.insertRow(new_row_out)\r\n del rows_out, new_row_out\r\n\r\n elif BCAreport.endswith('.csv'):\r\n\r\n BCAreportread = csv.DictReader(open(BCAreport, 'rb'), delimiter=',', quotechar='\"')\r\n rows_out = arcpy.InsertCursor(config.BCAReportGDBTable)\r\n for attribute in BCAreportread:\r\n new_row_out = rows_out.newRow()\r\n new_row_out.Project_Ref_No = attribute['Project_Ref_No']\r\n new_row_out.Project_Title = attribute['Project_Title']\r\n new_row_out.House_Blk_No = attribute['House_Blk_No']\r\n new_row_out.Road_Name = attribute['Road_Name']\r\n new_row_out.Level_No = attribute['Level_No']\r\n new_row_out.Unit_No = attribute['Unit_No']\r\n new_row_out.Building_Name = attribute['Building_Name']\r\n new_row_out.Postal_Code = attribute['Postal_Code']\r\n new_row_out.Project_Mukim_nos = attribute['Project_Mukim_nos']\r\n new_row_out.Project_Lot_nos = attribute['Project_Lot_nos']\r\n new_row_out.Permit_Type_of_Work = attribute['Permit_Type_of_Work']\r\n new_row_out.Type_of_Work = attribute['Type_of_Work']\r\n new_row_out.Owner_s_name = attribute['Owners_name']\r\n new_row_out.Owner_s_firm_name = attribute['Owners_firm_name']\r\n new_row_out.Owner_s_address = attribute['Owners_address']\r\n new_row_out.Owner_s_Tel_No = attribute['Owners_Tel_No']\r\n new_row_out.Owner_s_Email_address = attribute['Owners_Email_address']\r\n new_row_out.Builder_s_name = attribute['Builders_name']\r\n new_row_out.Builder_s_firm_name = attribute['Builders_firm_name']\r\n new_row_out.Builder_s_address = attribute['Builders_address']\r\n new_row_out.Builder_s_Tel_No = attribute['Builders_Tel_No']\r\n new_row_out.Builder_s_email_address = attribute['Builders_email_address']\r\n new_row_out.PE_s_name = attribute['PEs_name']\r\n new_row_out.PE_s_firm_name = attribute['PEs_firm_name']\r\n new_row_out.PE_s_address = attribute['PEs_address']\r\n new_row_out.PE_s_Tel_No = attribute['PEs_Tel_No']\r\n new_row_out.PE_s_Email_address = attribute['PEs_Email_address']\r\n new_row_out.Architect_s_name = attribute['Architects_name']\r\n new_row_out.Architect_s_firm_name = attribute['Architects_firm_name']\r\n new_row_out.Architect_s_address = attribute['Architects_address']\r\n new_row_out.Architect_s_Tel_No = attribute['Architects_Tel_No']\r\n new_row_out.Architect_s_Email_address = attribute['Architects_Email_address']\r\n new_row_out.Project_Cost = attribute['Project_Cost']\r\n new_row_out.Project_Duration = attribute['Project_Duration']\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = attribute['Approval_Date']\r\n rows_out.insertRow(new_row_out)\r\n if new_row_out:\r\n del new_row_out\r\n if rows_out:\r\n del rows_out\r\n\r\n except:\r\n log_error(\"Error in 02 Import of table to gdb: \", logFile)\r\n logFile.writelines(\"02 Import of table to gdb ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 03. Remove spaces in key fields for the concatenation\r\n logFile.writelines(\"03 Removing of spaces starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsSpace = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n\r\n for row in rowsSpace:\r\n ProjRef = row.Project_Ref_No.strip()\r\n ProjMukim = row.Project_Mukim_nos.strip()\r\n ProjLot = row.Project_Lot_nos.strip()\r\n BuilderN = row.Builder_s_name.strip()\r\n row.Project_Ref_No = ProjRef\r\n row.Project_Mukim_nos = ProjMukim\r\n row.Project_Lot_nos = ProjLot\r\n row.Builder_s_name = BuilderN\r\n rowsSpace.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpace:\r\n del rowsSpace\r\n except:\r\n log_error(\"Error in 03 Removing of spaces: \", logFile)\r\n logFile.writelines(\"03 Removing of spaces ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 04. Concatenate Project_Ref_No, Project_Mukim_nos, Project_Lot_nos, Builder_s_name\r\n logFile.writelines(\"04 Concatenate the three fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rows = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n for row in rows:\r\n expression = str(row.Project_Ref_No) + \"-\" + str(row.Project_Mukim_nos) + \"-\" + str(\r\n row.Project_Lot_nos) + \"-\" + str(row.Builder_s_name)\r\n row.ConcatFields = expression\r\n rows.updateRow(row)\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n\r\n except:\r\n log_error(\"Error in 04 Concatenate the three fields: \", logFile)\r\n logFile.writelines(\"04 Concatenate the three fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 05. Create temporary tables for Unique and Duplicate records\r\n logFile.writelines(\"05 Create temporary tables starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\Uniquerows\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\Uniquerows\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Uniquerows\", config.TemplateConcat, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Uniquerows\", config.TemplateConcat, \"\")\r\n\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\Duplicaterows\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\Duplicaterows\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Duplicaterows\", config.TemplateConcat, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Duplicaterows\", config.TemplateConcat, \"\")\r\n except:\r\n log_error(\"Error in 05 Create temporary tables: \", logFile)\r\n logFile.writelines(\"05 Create temporary tables ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 06. Separate unique and duplicate records\r\n logFile.writelines(\"06 Separate unique and duplicate rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n print \"Start step 06\"\r\n rows_inCB02 = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n rows_outUnique = arcpy.InsertCursor(config.UniqueRecords)\r\n # print rows_outUnique\r\n rows_outDuplicate = arcpy.InsertCursor(config.DuplicateRecords)\r\n\r\n rows_unique = []\r\n rows_duplicates = []\r\n for row in rows_inCB02:\r\n if row.ConcatFields not in rows_unique:\r\n rows_unique = rows_unique + [row.ConcatFields]\r\n else:\r\n rows_duplicates = rows_duplicates + [row.ConcatFields]\r\n\r\n print \"Start step 06 1\"\r\n for item in rows_unique:\r\n print \"clcounter: \" + str(clcounter)\r\n print \"item: \" + str(item)\r\n newrow = rows_outUnique.newRow()\r\n newrow.Concat = item\r\n # print newrow\r\n rows_outUnique.insertRow(newrow)\r\n\r\n print \"Start step 06 2\"\r\n for item in rows_duplicates:\r\n print \"clcounter: \" + str(clcounter)\r\n print \"item: \" + str(item)\r\n newrow = rows_outDuplicate.newRow()\r\n newrow.Concat = item\r\n rows_outDuplicate.insertRow(newrow)\r\n\r\n print \"Start step 06 3\"\r\n\r\n if rows_inCB02:\r\n del rows_inCB02\r\n if rows_outUnique:\r\n del rows_outUnique\r\n if rows_outDuplicate:\r\n del rows_outDuplicate\r\n if row:\r\n del row\r\n except:\r\n log_error(\"Error in 06 Separate unique and duplicate rows: \", logFile)\r\n logFile.writelines(\"06 Separate unique and duplicate rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 07. Get the rest of the fields for Uniquerows table\r\n logFile.writelines(\r\n \"07 Get the rest of the fields for unique rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.workspace = config.TempDataGDB\r\n arcpy.AddMessage(\"Starting toolbox JoinUniqueRestofFields\")\r\n\r\n try:\r\n arcpy.JoinUniqueRestofFields()\r\n except:\r\n log_error(\"Error in 07 Get the rest of the fields for unique rows: \", logFile)\r\n logFile.writelines(\r\n \"07 Get the rest of the fields for unique rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 08. Get the rest of the fields for Duplicaterows table\r\n logFile.writelines(\r\n \"08 Get the rest of the fields for duplicate rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"START toolbox JoinDuplicateRestofFields\")\r\n try:\r\n arcpy.JoinDuplicateRestofFields()\r\n\r\n except:\r\n log_error(\"Error in 08 Get the rest of the fields for duplicate rows: \", logFile)\r\n\r\n logFile.writelines(\r\n \"08 Get the rest of the fields for duplicate rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 09. Log duplicate records\r\n logFile.writelines(\"09 Log duplicate records starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"Logging duplicate records\")\r\n try:\r\n # Initialize the error log\r\n wbk = xlwt.Workbook()\r\n sheet = wbk.add_sheet('Book 1')\r\n row_count = 0\r\n col_count = 0\r\n header = ['Error_Message', 'Project_Ref_No', 'Project_Title', 'House_Blk_No', 'Road_Name', 'Level_No',\r\n 'Unit_No', 'Building_Name', 'Postal_Code', 'Project_Mukim_nos', 'Project_Lot_nos',\r\n 'Permit_Type_of_Work', 'Type_of_Work', 'Owners_name', 'Owners_firm_name', 'Owners_address',\r\n 'Owners_Tel_No', 'Owners_Email_address', 'Builders_name', 'Builders_firm_name',\r\n 'Builders_address', 'Builders_Tel_No', 'Builders_email_address', 'PEs_name', 'PEs_firm_name',\r\n 'PEs_address', 'PEs_Tel_No', 'PEs_Email_address', 'Architects_name', 'Architects_firm_name',\r\n 'Architects_address', 'Architects_Tel_No', 'Architects_Email_address', 'Project_Cost',\r\n 'Project_Duration', 'Approval_Date']\r\n for fieldname in header:\r\n sheet.write(row_count, col_count, fieldname)\r\n col_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n # Log duplicate records\r\n rows = arcpy.SearchCursor(config.DuplicateRows)\r\n\r\n row_count = 1\r\n col_count = 0\r\n row = None\r\n for row in rows:\r\n message = ['Duplicate record in the BCA report', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n except:\r\n log_error(\"Error in 09 Log duplicate records: \", logFile)\r\n\r\n logFile.writelines(\"09 Log duplicate records ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 10. Split rows based on Mukim numbers\r\n logFile.writelines(\"10 Splitting of rows based on mukim starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.SplittedMukimRows):\r\n arcpy.Delete_management(config.SplittedMukimRows)\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_mukim_03\", config.TemplateBCAReport, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_mukim_03\", config.TemplateBCAReport, \"\")\r\n\r\n if arcpy.Exists(config.SplittedProjLotRows):\r\n arcpy.Delete_management(config.SplittedProjLotRows)\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_projlot_04\", config.TemplateBCAReport, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_projlot_04\", config.TemplateBCAReport, \"\")\r\n\r\n rows_in = arcpy.SearchCursor(config.UniqueRows)\r\n rows_out = arcpy.InsertCursor(config.SplittedMukimRows)\r\n\r\n for row in rows_in:\r\n list_mukim_nos = row.Project_Mukim_nos.split(\",\")\r\n for proj_mukim_nos_id in list_mukim_nos:\r\n new_row_out = rows_out.newRow()\r\n new_row_out.Project_Mukim_nos = proj_mukim_nos_id\r\n new_row_out.PROJECTMUKIM_RAW = row.Project_Mukim_nos\r\n new_row_out.Project_Ref_No = row.Project_Ref_No\r\n new_row_out.Project_Title = row.Project_Title\r\n new_row_out.House_Blk_No = row.House_Blk_No\r\n new_row_out.Road_Name = row.Road_Name\r\n new_row_out.Level_No = row.Level_No\r\n new_row_out.Unit_No = row.Unit_No\r\n new_row_out.Building_Name = row.Building_Name\r\n new_row_out.Postal_Code = row.Postal_Code\r\n new_row_out.Project_Lot_nos = row.Project_Lot_nos\r\n new_row_out.Permit_Type_of_Work = row.Permit_Type_of_Work\r\n new_row_out.Type_of_Work = row.Type_of_Work\r\n new_row_out.Owner_s_name = row.Owner_s_name\r\n new_row_out.Owner_s_firm_name = row.Owner_s_firm_name\r\n new_row_out.Owner_s_address = row.Owner_s_address\r\n new_row_out.Owner_s_Tel_No = row.Owner_s_Tel_No\r\n new_row_out.Owner_s_Email_address = row.Owner_s_Email_address\r\n new_row_out.Builder_s_name = row.Builder_s_name\r\n new_row_out.Builder_s_firm_name = row.Builder_s_firm_name\r\n new_row_out.Builder_s_address = row.Builder_s_address\r\n new_row_out.Builder_s_Tel_No = row.Builder_s_Tel_No\r\n new_row_out.Builder_s_email_address = row.Builder_s_email_address\r\n new_row_out.PE_s_name = row.PE_s_name\r\n new_row_out.PE_s_firm_name = row.PE_s_firm_name\r\n new_row_out.PE_s_address = row.PE_s_address\r\n new_row_out.PE_s_Tel_No = row.PE_s_Tel_No\r\n new_row_out.PE_s_Email_address = row.PE_s_Email_address\r\n new_row_out.Architect_s_name = row.Architect_s_name\r\n new_row_out.Architect_s_firm_name = row.Architect_s_firm_name\r\n new_row_out.Architect_s_address = row.Architect_s_address\r\n new_row_out.Architect_s_Tel_No = row.Architect_s_Tel_No\r\n new_row_out.Architect_s_Email_address = row.Architect_s_Email_address\r\n new_row_out.Project_Cost = row.Project_Cost\r\n new_row_out.Project_Duration = row.Project_Duration\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = row.Approval_Date_DD_MM_YYYY_\r\n rows_out.insertRow(new_row_out)\r\n if row:\r\n del row\r\n if new_row_out:\r\n del new_row_out\r\n if rows_in:\r\n del rows_in\r\n if rows_out:\r\n del rows_out\r\n except:\r\n log_error(\"Error in 10 Splitting of rows based on mukim: \", logFile)\r\n\r\n logFile.writelines(\"10 Splitting of rows based on mukim ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 11.Split rows based on Project lot numbers\r\n arcpy.AddMessage(\"Splitting rows based on project lots\")\r\n\r\n logFile.writelines(\r\n \"11 Splitting of rows based on project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rows_in03 = arcpy.SearchCursor(config.SplittedMukimRows)\r\n rows_out04 = arcpy.InsertCursor(config.SplittedProjLotRows)\r\n\r\n for row in rows_in03:\r\n list_proj_lot_nos = row.Project_Lot_nos.split(\",\")\r\n print list_proj_lot_nos\r\n for proj_lot_nos_id in list_proj_lot_nos:\r\n print proj_lot_nos_id\r\n new_row_out = rows_out04.newRow()\r\n new_row_out.Project_Lot_nos = proj_lot_nos_id\r\n new_row_out.PROJECTMUKIM_RAW = row.PROJECTMUKIM_RAW\r\n new_row_out.PROJECTLOT_RAW = row.Project_Lot_nos\r\n new_row_out.Project_Ref_No = row.Project_Ref_No\r\n new_row_out.Project_Title = row.Project_Title\r\n new_row_out.House_Blk_No = row.House_Blk_No\r\n new_row_out.Road_Name = row.Road_Name\r\n new_row_out.Level_No = row.Level_No\r\n new_row_out.Unit_No = row.Unit_No\r\n new_row_out.Building_Name = row.Building_Name\r\n new_row_out.Postal_Code = row.Postal_Code\r\n new_row_out.Project_Mukim_nos = row.Project_Mukim_nos\r\n new_row_out.Permit_Type_of_Work = row.Permit_Type_of_Work\r\n new_row_out.Type_of_Work = row.Type_of_Work\r\n new_row_out.Owner_s_name = row.Owner_s_name\r\n new_row_out.Owner_s_firm_name = row.Owner_s_firm_name\r\n new_row_out.Owner_s_address = row.Owner_s_address\r\n new_row_out.Owner_s_Tel_No = row.Owner_s_Tel_No\r\n new_row_out.Owner_s_Email_address = row.Owner_s_Email_address\r\n new_row_out.Builder_s_name = row.Builder_s_name\r\n new_row_out.Builder_s_firm_name = row.Builder_s_firm_name\r\n new_row_out.Builder_s_address = row.Builder_s_address\r\n new_row_out.Builder_s_Tel_No = row.Builder_s_Tel_No\r\n new_row_out.Builder_s_email_address = row.Builder_s_email_address\r\n new_row_out.PE_s_name = row.PE_s_name\r\n new_row_out.PE_s_firm_name = row.PE_s_firm_name\r\n new_row_out.PE_s_address = row.PE_s_address\r\n new_row_out.PE_s_Tel_No = row.PE_s_Tel_No\r\n new_row_out.PE_s_Email_address = row.PE_s_Email_address\r\n new_row_out.Architect_s_name = row.Architect_s_name\r\n new_row_out.Architect_s_firm_name = row.Architect_s_firm_name\r\n new_row_out.Architect_s_address = row.Architect_s_address\r\n new_row_out.Architect_s_Tel_No = row.Architect_s_Tel_No\r\n new_row_out.Architect_s_Email_address = row.Architect_s_Email_address\r\n new_row_out.Project_Cost = row.Project_Cost\r\n new_row_out.Project_Duration = row.Project_Duration\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = row.Approval_Date_DD_MM_YYYY_\r\n rows_out04.insertRow(new_row_out)\r\n\r\n if row:\r\n del row\r\n if new_row_out:\r\n del new_row_out\r\n if rows_in03:\r\n del rows_in03\r\n if rows_out04:\r\n del rows_out04\r\n # print int(arcpy.GetCount_management(SplittedProjLotRows).getOutput(0))\r\n except:\r\n log_error(\"Error in 11 Splitting of rows based on project lot: \", logFile)\r\n logFile.writelines(\r\n \"11 Splitting of rows based on project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 12. Remove spaces in Mukim and Project lot values\r\n logFile.writelines(\r\n \"12 Removing of spaces in mukim and project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"Cleaning project lots\")\r\n try:\r\n\r\n rowsSpaces = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsSpaces:\r\n lot_no_spaces = row.Project_Lot_nos.strip()\r\n mukim_no_spaces = row.Project_Mukim_nos.strip()\r\n row.Project_Lot_nos = lot_no_spaces\r\n row.Project_Mukim_nos = mukim_no_spaces\r\n rowsSpaces.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpaces:\r\n del rowsSpaces\r\n except:\r\n log_error(\"Error in 12 Removing of spaces in mukim and project lot: \", logFile)\r\n logFile.writelines(\r\n \"12 Removing of spaces in mukim and project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 13. Log empty Mukimlot or date fields\r\n logFile.writelines(\r\n \"13 Log empty mukim and project lot nos starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsEmpty = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsEmpty:\r\n message = ['Missing Project lot or Mukim numbers', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n message2 = ['Missing Project duration or Approval date', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name,\r\n row.Architect_s_firm_name, row.Architect_s_address, row.Architect_s_Tel_No,\r\n row.Architect_s_Email_address, row.Project_Cost, row.Project_Duration,\r\n row.Approval_Date_DD_MM_YYYY_]\r\n if row.Project_Mukim_nos is None or (len(row.Project_Mukim_nos) < 4):\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n elif row.Project_Lot_nos is None or (len(row.Project_Lot_nos) == 0):\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n if row.Project_Duration is None or (len(row.Project_Duration) < 1):\r\n col_count = 0\r\n for element in message2:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n\r\n elif row.Approval_Date_DD_MM_YYYY_ is None or (len(row.Approval_Date_DD_MM_YYYY_) < 1):\r\n col_count = 0\r\n for element in message2:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rowsEmpty:\r\n del rowsEmpty\r\n except:\r\n log_error(\"Error in 13 Log for empty mukim and project lot nos: \", logFile)\r\n logFile.writelines(\"13 Log empty mukim and project lot nos ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 14. Error log for those with bad values\r\n arcpy.AddMessage(\"14 Logging bad values\")\r\n logFile.writelines(\"14 Log if bad values exist starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsBadValues = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsBadValues:\r\n message = ['Mukim or Project lot numbers have bad values', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n if len(REGEX_FOR_INVALID_CHARS.findall(row.Project_Lot_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n elif len(REGEX_FOR_INVALID_CHARS.findall(row.Project_Mukim_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n elif len(uptodigit(row.Project_Lot_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n if row:\r\n del row\r\n if rowsBadValues:\r\n del rowsBadValues\r\n except:\r\n log_error(\"Error in 14 Log if bad values exist: \", logFile)\r\n logFile.writelines(\"14 Log if bad values exist ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 15. Add zeros for Project Lot numbers\r\n logFile.writelines(\"15 Add zeros starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsZeros = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n letters = string.ascii_letters\r\n for row in rowsZeros:\r\n letter_count = len(filter(functools.partial(operator.contains, letters), row.Project_Lot_nos))\r\n filled_string = row.Project_Lot_nos.zfill(5 + letter_count)\r\n row.Project_Lot_nos = filled_string\r\n rowsZeros.updateRow(row)\r\n if row:\r\n del row\r\n if rowsZeros:\r\n del rowsZeros\r\n except:\r\n log_error(\"Error in 15 Add zeros: \", logFile)\r\n logFile.writelines(\"15 Add zeros ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 16. Add and populate fields Mukim_Lot_No, Mukimlot_wo_letter, and Permit_date\r\n logFile.writelines(\"16 Add and populate fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsPop = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n for row in rowsPop:\r\n expression = str(row.Project_Mukim_nos) + \"-\" + str(row.Project_Lot_nos)\r\n row.Mukim_Lot_No = expression\r\n date = filedate.strftime(\"%Y%m%d\")\r\n year = int(date[:4])\r\n month = int(date[4:6])\r\n day = int(date[6:8])\r\n permit_date = datetime.datetime(year, month, day)\r\n row.Permit_date = permit_date\r\n rowsPop.updateRow(row)\r\n if row:\r\n del row\r\n if rowsPop:\r\n del rowsPop\r\n # Calculate Mukimlot_wo_letter\r\n arcpy.CalculateField_management(config.SplittedProjLotRows, \"Mukimlot_wo_letter\", \"!Mukim_Lot_No![:10]\",\r\n \"PYTHON_9.3\", \"\")\r\n\r\n except:\r\n log_error(\"Error in 16 Add and populate fields: \", logFile)\r\n logFile.writelines(\"16 Add and populate fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 17.Match mukim lot and land lot\r\n logFile.writelines(\"17 Match mukim lot with landlot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.MatchMukimLandLot()\r\n except:\r\n log_error(\"Error in 17 Match mukim lot with landlot: \", logFile)\r\n logFile.writelines(\"17 Match mukim lot with landlot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 18.Get unmatched mukim lot with land lot\r\n logFile.writelines(\"18 Get unmatched mukim lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"18 Get unmatched mukim lot\")\r\n try:\r\n arcpy.GetUnmatchedMukimLot()\r\n\r\n except:\r\n log_error(\"Error in 18 Get unmatched mukim lot: \", logFile)\r\n\r\n logFile.writelines(\"18 Get unmatched mukim lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 19. Log errors for unmatched mukim lots\r\n logFile.writelines(\"19 Log unmatched mukim lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsUnmatched = arcpy.SearchCursor(config.UnmatchedMukimLot)\r\n row = None\r\n\r\n for row in rowsUnmatched:\r\n message = ['Unmatched mukim lot with the land lot', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rowsUnmatched:\r\n del rowsUnmatched\r\n\r\n with xlrd.open_workbook(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\") as wb:\r\n sh = wb.sheet_by_index(0)\r\n if sh.nrows == 1:\r\n os.remove(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n except arcpy.ExecuteError:\r\n log_error(\"Error in 19 Log unmatched mukim lot: \", logFile)\r\n logFile.writelines(\"19 Log unmatched mukim lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 20. Prepare the table for MukimConstruct matching (add required fields)\r\n logFile.writelines(\"20 Add fields to be used for matching starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n if arcpy.Exists(config.MUKIMCONSTRUCTImport):\r\n arcpy.Delete_management(config.MUKIMCONSTRUCTImport)\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.TempDataGDB,\r\n \"MUKIM_CONSTRUCT_Import\")\r\n else:\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.TempDataGDB,\r\n \"MUKIM_CONSTRUCT_Import\")\r\n\r\n arcpy.AddField_management(config.MatchedMukimLot, \"Concat_4fields\", \"Text\", \"\", \"\", \"\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCTImport, \"Concat_4fields\", \"Text\", \"\", \"\", \"\")\r\n arcpy.AddField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS2\", \"Double\", \"\", \"\", \"\")\r\n except:\r\n log_error(\"Error in 20 Add fields to be used for matching: \", logFile)\r\n logFile.writelines(\"20 Add fields to be used for matching ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 21. Calculate Project Duration as months\r\n logFile.writelines(\"21 Calculate PROJ_DURATION as months starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsProjDur = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsProjDur:\r\n durationstr = row.PROJ_DURATION_MTHS\r\n if \"Month\" in row.PROJ_DURATION_MTHS:\r\n durationintmth = int(durationstr.split(' ')[0])\r\n row.PROJ_DURATION_MTHS2 = durationintmth\r\n elif \"Year\" in row.PROJ_DURATION_MTHS:\r\n durationintyr = int(durationstr.split(' ')[0]) * 12\r\n row.PROJ_DURATION_MTHS2 = durationintyr\r\n rowsProjDur.updateRow(row)\r\n if rowsProjDur:\r\n del rowsProjDur\r\n if row:\r\n del row\r\n\r\n arcpy.DeleteField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\")\r\n arcpy.AddField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\", \"Double\")\r\n arcpy.CalculateField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\", \"[PROJ_DURATION_MTHS2]\")\r\n except:\r\n log_error(\"Error in 21 Calculate PROJ_DURATION as months: \", logFile)\r\n logFile.writelines(\"21 Calculate PROJ_DURATION as months ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 22. Concatenate 4 fields to be used in checking if mukimlot already exists in MUKIMCONSTRUCT\r\n logFile.writelines(\"22 Concatenate 4 fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsConcat1 = arcpy.UpdateCursor(config.MUKIMCONSTRUCTImport)\r\n\r\n for row in rowsConcat1:\r\n expression = str(row.PROJ_REF_NO) + \"-\" + str(row.BUILDER_NAME) + \"-\" + str(\r\n row.LOT_KEY) + \"-\" + str(row.PERMIT_DATE)\r\n row.Concat_4fields = expression\r\n rowsConcat1.updateRow(row)\r\n if row:\r\n del row\r\n if rowsConcat1:\r\n del rowsConcat1\r\n\r\n rowsConcat2 = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsConcat2:\r\n expression = str(row.PROJ_REF_NO) + \"-\" + str(row.BUILDER_NAME) + \"-\" + str(\r\n row.LOT_KEY) + \"-\" + str(row.PERMIT_DATE)\r\n row.Concat_4fields = expression\r\n rowsConcat2.updateRow(row)\r\n if row:\r\n del row\r\n if rowsConcat2:\r\n del rowsConcat2\r\n except:\r\n log_error(\"Error in 22 Concatenate 4 fields: \", logFile)\r\n logFile.writelines(\"22 Concatenate 4 fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 23.Match mukim lot with mukim construct\r\n logFile.writelines(\"23 Match mukimlot with mukim construct at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.workspace = config.TempDataGDB # \"G:\\\\Project\\\\GERIUPGRADE\\\\GPTools\\\\NotificationSysTools\\\\BCAReportProcessing\\\\Temp_data.gdb\"\r\n try:\r\n arcpy.MatchedMukimlotMukimConstruct()\r\n except:\r\n log_error(\"Error in 23 Match mukimlot with mukim construct: \", logFile)\r\n logFile.writelines(\"23 Match mukimlot with mukim construct ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 24.Copy raw values to project lot and project mukim columns and delete the 2 fields\r\n logFile.writelines(\"24 Recalculate projlot and projmukim based on original values starts at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsRaw = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsRaw:\r\n row.PROJ_MUKIM_NOS = row.PROJECTMUKIM_RAW\r\n row.PROJ_LOT_NOS = row.PROJECTLOT_RAW\r\n rowsRaw.updateRow(row)\r\n if row:\r\n del row\r\n if rowsRaw:\r\n del rowsRaw\r\n except:\r\n log_error(\"Error in 24 Recalculate projlot and projmukim based on original values:\", logFile)\r\n logFile.writelines(\"24 Recalculate projlot and projmukim based on original values ends at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n\r\n # 25. Export Cleaned BCA Permit report for CWD\r\n logFile.writelines(\r\n \"25 Export of Cleaned BCA Permit report starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n # Initialize the file\r\n CleanedBCAPermitReport = xlwt.Workbook()\r\n book = CleanedBCAPermitReport.add_sheet('Book 1')\r\n countrow = 0\r\n countcol = 0\r\n fields = ['Project Ref No', 'Project Title', 'House Blk No', 'Road Name', 'Level No', 'Unit No',\r\n 'Building Name', 'Postal Code', 'Project Mukim nos', 'Project Lot nos', 'Permit Type of Work',\r\n 'Type of Work', \"Owner's name\", \"Owner's firm name\", \"Owner's address\", \"Owner's Tel No\",\r\n \"Owner's Email address\", \"Builder's name\", \"Builder's firm name\", \"Builder's address\",\r\n \"Builder's Tel No\", \"Builder's email address\", \"PE's name\", \"PE's firm name\", \"PE's address\",\r\n \"PE's Tel No\", \"PE's Email address\", \"Architect's name\", \"Architect's firm name\",\r\n \"Architect's address\", \"Architect's Tel No\", \"Architect's Email address\", 'Project Cost',\r\n 'Project Duration', 'Approval Date(DD/MM/YYYY)']\r\n for fieldname in fields:\r\n book.write(countrow, countcol, fieldname)\r\n countcol += 1\r\n CleanedBCAPermitReport.save(config.CleanedBCAPermitFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \".xls\")\r\n\r\n # Copy the data to Excel File\r\n data = arcpy.SearchCursor(config.MatchedMukimLot)\r\n\r\n countrow = 1\r\n countcol = 0\r\n for row in data:\r\n message = [row.PROJ_REF_NO, row.PROJ_TITLE, row.HOUSE_BLK_NO, row.ROAD_NAME, row.LEVEL_NO,\r\n row.UNIT_NO, row.BUILDING_NAME, row.POSTAL_CODE, row.PROJ_MUKIM_NOS, row.PROJ_LOT_NOS,\r\n row.PERMIT_WORK_TYPE, row.WORK_TYPE, row.OWNER_NAME, row.OWNER_FIRM_NAME, row.OWNER_ADDR,\r\n row.OWNER_TEL, row.OWNER_EMAIL, row.BUILDER_NAME, row.BUILDER_FIRM_NAME,\r\n row.BUILDER_ADDR, row.BUILDER_TEL, row.BUILDER_EMAIL, row.PE_NAME, row.PE_FIRM_NAME,\r\n row.PE_ADDR, row.PE_TEL, row.PE_EMAIL, row.ARCHITECT_NAME, row.ARCHITECT_FIRM_NAME,\r\n row.ARCHITECT_ADDR, row.ARCHITECT_TEL, row.ARCHITECT_EMAIL, row.PROJ_COST,\r\n row.PROJ_DURATION_MTHS, row.PROJ_APPROVAL_DATE]\r\n countcol = 0\r\n for element in message:\r\n book.write(countrow, countcol, element)\r\n countcol += 1\r\n countrow += 1\r\n CleanedBCAPermitReport.save(config.CleanedBCAPermitFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \".xls\")\r\n if row:\r\n del row\r\n if data:\r\n del data\r\n except:\r\n log_error(\"Error in 25 Export of Cleaned BCA Permit Report: Error in 26 Catchment calculation: \", logFile)\r\n logFile.writelines(\"25 Export of Cleaned BCA Permit Report ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 26. Catchment calculation\r\n arcpy.env.workspace = config.TempDataGDB\r\n logFile.writelines(\"26 Catchment calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.CatchmentCalculation()\r\n except:\r\n log_error(\"Error in 26 Catchment calculation: \", logFile)\r\n logFile.writelines(\"26 Catchment calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 27. Depot calculation\r\n logFile.writelines(\"27 Depot calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.DepotCalculation()\r\n except:\r\n log_error(\"Error in 27 Depot calculation: \", logFile)\r\n logFile.writelines(\"27 Depot calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 28. Re-add date fields and populate\r\n logFile.writelines(\"28 Re-add date fields and populate starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PERMIT_DATE\", \"Date\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PROJ_APPROVAL_DATE2\", \"Date\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PROJ_END_DATE\", \"Date\")\r\n\r\n rows = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rows:\r\n date = filedate.strftime(\"%Y%m%d\")\r\n year = int(date[:4])\r\n month = int(date[4:6])\r\n day = int(date[6:8])\r\n permit_date = datetime.datetime(year, month, day)\r\n row.PERMIT_DATE = permit_date\r\n row.PROJ_APPROVAL_DATE2 = datetime.datetime.strptime(row.PROJ_APPROVAL_DATE, '%d/%m/%Y')\r\n rows.updateRow(row)\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n except:\r\n log_error(\"Error in 28 Re-add fields and populate: \", logFile)\r\n logFile.writelines(\"28 Re-add fields and populate ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 29. Calculate the end date field\r\n logFile.writelines(\"29 Calculate the end date field starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n\r\n rowsEndDate = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rowsEndDate:\r\n sourcedate = row.PROJ_APPROVAL_DATE2\r\n # sourcedate = datetime.datetime.strptime(row.PROJ_APPROVAL_DATE2 , '%d/%m/%Y')\r\n months = int(row.PROJ_DURATION_MTHS)\r\n d = add_months(sourcedate, months)\r\n row.PROJ_END_DATE = d\r\n rowsEndDate.updateRow(row)\r\n if row:\r\n del row\r\n if rowsEndDate:\r\n del rowsEndDate\r\n except:\r\n log_error(\"Error in 29 Calculate the end date field: \", logFile)\r\n logFile.writelines(\"29 Calculate the end date field ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 30. Calculate Project Total Area\r\n logFile.writelines(\"30 Project total area calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.ProjectTotalArea()\r\n except:\r\n log_error(\"Error in 30 Project total area calculation: \", logFile)\r\n logFile.writelines(\"30 Project total area calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 31. Calculate the BCA_CORRECTED_BY\r\n logFile.writelines(\"31 Calculate the BCA_CORRECTED_BY starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rows_BCA_CB = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rows_BCA_CB:\r\n if \"\\WSN\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"WSN\"\r\n elif \"\\WRN\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"WRN\"\r\n elif \"\\CWD\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"CWD\"\r\n rows_BCA_CB.updateRow(row)\r\n if row:\r\n del row\r\n if rows_BCA_CB:\r\n del rows_BCA_CB\r\n except:\r\n log_error(\"Error in 31 Calculate the BCA_CORRECTED_BY: \", logFile)\r\n\r\n # 32. Remove spaces in PROJ_REF_NO\r\n logFile.writelines(\r\n \"32 Removing of spaces in mukim and project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsSpaces = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rowsSpaces:\r\n lot_no_spaces = row.PROJ_REF_NO.strip()\r\n row.PROJ_REF_NO = lot_no_spaces\r\n rowsSpaces.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpaces:\r\n del rowsSpaces\r\n except:\r\n log_error(\"Error in 32 Removing of spaces in mukim and project lot: \", logFile)\r\n logFile.writelines(\r\n \"32 Removing of spaces in mukim and project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 33. Process the Mukim Construct by Project\r\n logFile.writelines(\r\n \"33 Process the Mukim Construct by Project starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.overwriteOutput = True\r\n try:\r\n MUKIM_CONSTRUCT_BYPROJ_IMPORT = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_IMPORT\"\r\n MUKIMCONBYPROJ_SORT = config.TempDataGDB + \"\\\\MUKIMCONBYPROJ_SORT\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS__2_ = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n\r\n if arcpy.Exists(MUKIM_CONSTRUCT_BYPROJ_IMPORT):\r\n arcpy.Delete_management(MUKIM_CONSTRUCT_BYPROJ_IMPORT)\r\n if arcpy.Exists(MUKIMCONBYPROJ_SORT):\r\n arcpy.Delete_management(MUKIMCONBYPROJ_SORT)\r\n if arcpy.Exists(MUKIM_CONSTRUCT_BYPROJ_DISS):\r\n arcpy.Delete_management(MUKIM_CONSTRUCT_BYPROJ_DISS)\r\n\r\n arcpy.MUKIMCONBYPROJ()\r\n # arcpy.MUKIMCONSTRUCTBYPROJProcess2()\r\n\r\n arcpy.Sort_management(MUKIM_CONSTRUCT_BYPROJ_IMPORT, MUKIMCONBYPROJ_SORT, \"PROJ_END_DATE DESCENDING\",\r\n \"UR\")\r\n arcpy.Dissolve_management(MUKIMCONBYPROJ_SORT, MUKIM_CONSTRUCT_BYPROJ_DISS, \"PROJ_REF_NO\",\r\n \"LOT_KEY FIRST;PROJ_REF_NO FIRST;PROJ_TITLE FIRST;HOUSE_BLK_NO FIRST;ROAD_NAME FIRST;POSTAL_CODE FIRST;LEVEL_NO FIRST;UNIT_NO FIRST;BUILDING_NAME FIRST;PROJ_MUKIM_NOS FIRST;PROJ_LOT_NOS FIRST;PERMIT_WORK_TYPE FIRST;WORK_TYPE FIRST;OWNER_NAME FIRST;OWNER_FIRM_NAME FIRST;OWNER_ADDR FIRST;OWNER_TEL FIRST;OWNER_EMAIL FIRST;BUILDER_NAME FIRST;BUILDER_FIRM_NAME FIRST;BUILDER_ADDR FIRST;BUILDER_TEL FIRST;BUILDER_EMAIL FIRST;PE_NAME FIRST;PE_FIRM_NAME FIRST;PE_ADDR FIRST;PE_TEL FIRST;PE_EMAIL FIRST;ARCHITECT_NAME FIRST;ARCHITECT_FIRM_NAME FIRST;ARCHITECT_ADDR FIRST;ARCHITECT_TEL FIRST;ARCHITECT_EMAIL FIRST;PROJ_TOT_AREA FIRST;PROJ_PARENT_CWDCATCHMENT FIRST;PROJ_PARENT_WSNDEPOT FIRST;PROJ_PARENT_WRPCATCHMENT FIRST;BCA_CORRECTED_BY FIRST;PROJ_DURATION_MTHS FIRST;PROJ_COST FIRST\",\r\n \"MULTI_PART\", \"DISSOLVE_LINES\")\r\n arcpy.JoinField_management(MUKIM_CONSTRUCT_BYPROJ_DISS, \"FIRST_PROJ_REF_NO\", MUKIMCONBYPROJ_SORT,\r\n \"PROJ_REF_NO\", \"PROJ_APPROVAL_DATE;PROJ_END_DATE;PERMIT_DATE\")\r\n arcpy.CalculateField_management(MUKIM_CONSTRUCT_BYPROJ_DISS__2_, \"FIRST_PROJ_TOT_AREA\",\r\n \"[Shape_Area]/10000\", \"VB\", \"\")\r\n\r\n except:\r\n log_error(\"Error in 33 Process the Mukim Construct by Project: \", logFile)\r\n logFile.writelines(\r\n \"33 Process the Mukim Construct by Project ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"33 END process MUKIM CONSTRUCT\")\r\n\r\n # 34. Filter on-going projects\r\n\r\n logFile.writelines(\"34 Filter on-going projects starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n # TempDataGDB = \"G:\\\\Project\\\\GERIUPGRADE\\\\GPTools\\\\NotificationSysTools\\\\BCAReportProcessing\\\\Temp_data.gdb\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n rowsIn = arcpy.UpdateCursor(MUKIM_CONSTRUCT_BYPROJ_DISS)\r\n\r\n row = None\r\n for row in rowsIn:\r\n strdays = str(row.PROJ_END_DATE.date() - datetime.date.today())\r\n splitDays = strdays.split()\r\n if splitDays[0] == '0:00:00':\r\n result = \"On-going project (but will end today)\"\r\n else:\r\n if int(splitDays[0]) < 0:\r\n rowsIn.deleteRow(row)\r\n else:\r\n result = \"On-going project\"\r\n if rowsIn:\r\n del rowsIn\r\n if row:\r\n del row\r\n\r\n except:\r\n log_error(\"Error in 34 Filter on-going projects: \", logFile)\r\n logFile.writelines(\"34 Filter on-going projects ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 35. Append the new data to MUKIM_CONSTRUCT\r\n logFile.writelines(\r\n \"35 Append the new data to MUKIM_CONSTRUCT starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.AppendNewData()\r\n except:\r\n log_error(\"Error in 35 Append the new data to MUKIM_CONSTRUCT: \", logFile)\r\n logFile.writelines(\r\n \"35 Append the new data to MUKIM_CONSTRUCT ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # Clean the memory and the schema lock\r\n arcpy.RefreshCatalog(config.Notification)\r\n arcpy.Compact_management(config.TempDataGDB)\r\n gc.collect()\r\n\r\n # Status update to run/not run the SiteInspection Update\r\n Log_SiteInspectionUpdate = file(config.SiteInspectionUpdate, \"w\")\r\n Log_SiteInspectionUpdate.writelines(\"YES\")\r\n Log_SiteInspectionUpdate.close()\r\n\r\n arcpy.AddMessage(\"END BCA Processing\")\r\n arcpy.AddMessage(\"Passing file date to other functions: \" + repr(filedate))\r\n\r\n # Generate Report\r\n import ReportGeneration_Adhoc_WithProjects as gen_report\r\n gen_report.run(filedate)\r\n #\r\n # # Send email to departments\r\n # import EmailGenerationCompletion_adhoc as send_dept_notification\r\n # if \"CORRECTED\" in BCAreport.upper():\r\n # send_dept_notification.run(filedate, corrected=True)\r\n # else:\r\n # send_dept_notification.run(filedate)\r\n\r\n # Generate advisory letters\r\n import LetterGeneration as letter_gen\r\n letter_gen.run(filedate)\r\n #\r\n # # Send letters to project team\r\n # import EmailGeneration as send_advisory_email\r\n # send_advisory_email.run(filedate)\r\n\r\n\r\n # 36. Move the BCAReport in the backup folder\r\n for BCAreport in correct_config_files:\r\n\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n bk_file_path = os.path.join(config.BCAreportBackupFolder, input_file_name)\r\n\r\n # if the same file name exists in the backup folder, rename the new file with timestamp and move\r\n if os.path.exists(bk_file_path):\r\n\r\n new_filename = datetime.datetime.now().strftime(\"%Y%m%d-%H%M\") + input_file_name\r\n new_filepath = os.path.join(config.BCAreportBackupFolder, new_filename)\r\n shutil.copy(BCAreport, new_filepath)\r\n os.remove(BCAreport)\r\n\r\n # if the filename does not exist in the backup folder, move the file to backup\r\n else:\r\n shutil.move(BCAreport, config.BCAreportBackupFolder)\r\n\r\n logFile.writelines(\"Moved the BCA report to the backup folder at \" + str(datetime.datetime.now()) + \"\\n\")\r\n logFile.close()",
"def cmd_error_check(self, cmd_out):\n for err in self.err_strings:\n if re.search('\\\\b%s\\\\b' % (err), cmd_out, re.I):\n _log.info(cmd_out)\n _log.info(\n \"Cmd execution failed! with this Return Error: \\n%s\" % (\n cmd_out))\n return 0",
"def test_failedCommandProvidesOutput(self):\n bookTeX = FilePath(self.mktemp() + \".tex\")\n builder = BookBuilder()\n inputState = bookTeX.parent().children()\n exc = self.assertRaises(\n CommandFailed,\n builder.buildPDF,\n bookTeX, self.howtoDir, FilePath(self.mktemp()))\n self.assertTrue(exc.output)\n newOutputState = set(bookTeX.parent().children()) - set(inputState)\n self.assertEqual(len(newOutputState), 1)\n workPath = newOutputState.pop()\n self.assertTrue(\n workPath.isdir(),\n \"Expected work path %r was not a directory.\" % (workPath.path,))",
"def test_check_exit_status(self):\n run_dir_success = 'data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2'\n success_run = MinIONqc(run_dir_success, None, None)\n self.assertTrue(success_run.check_exit_status('data/nanopore_data/run4/done_demuxing/20200104_1412_MN19414_AAU644_68125dc2/.exitcode_for_nanoseq'))\n run_dir_fail = 'data/nanopore_data/run8/demux_failed/20200108_1412_MN19414_AAU648_68125dc2'\n fail_run = MinIONqc(run_dir_fail, None, None)\n self.assertFalse(fail_run.check_exit_status('data/nanopore_data/run8/demux_failed/20200108_1412_MN19414_AAU648_68125dc2/.exitcode_for_nanoseq'))",
"def run_command_check(self):\n pass",
"def check_commands(self):\n pass",
"def run_test_second():\n os.system(\n \"sed -n '/(Failed)$/p' test_op_log.txt | awk '{print $3}' >& rerun_op.txt\"\n )\n rerun_list = get_op_list('rerun_op.txt')\n if len(rerun_list):\n print(\n \"-------there are \"\n + str(len(rerun_list))\n + \" op(s) need to rerun!!!-------\"\n )\n for failed_op in rerun_list:\n os.system(\"ctest -R \\\"(\" + failed_op + \")\\\" \")\n else:\n print(\"-------all op passed successfully!!!-------\")",
"def extract_failed_tests_info():\n global g_failed_testnames\n global g_failed_test_paths\n\n if os.path.isfile(g_temp_filename):\n console_file = open(g_temp_filename,'r') # open temp file that stored jenkins job console output\n try:\n for each_line in console_file: # go through each line of console output to extract build ID, data/time ...\n each_line.strip()\n print(each_line)\n if (\"Test Result\" in each_line) and (\"failure\" in each_line): # the next few lines will contain failed tests\n temp = each_line.split(\"testReport\")\n if (\"Test Result\" in temp[1]) and (\"failure\" in temp[1]): # grab number of failed tests\n try:\n tempCount = int(temp[1].split(\"</a>\")[1].split(\" \")[0].split(\"(\")[1])\n\n if isinstance(tempCount, int) and tempCount > 0: # temp[1], temp[2],... should contain failed tests\n for findex in range(2,len(temp)):\n tempMess = temp[findex].split(\">\")\n g_failed_test_paths.append(tempMess[0].strip('\"'))\n ftestname = tempMess[1].strip(\"</a\")\n nameLen = len(ftestname)\n true_testname = ftestname[8:nameLen] if 'r_suite.' in ftestname else ftestname\n g_failed_testnames.append(true_testname)\n break # done. Only one spot contains failed test info.\n except:\n break # file probably does not have failures captured.\n finally:\n console_file.close()",
"def test_stratis_bad_subcommand(self):\n for command_line in [\n [\"notasub\"],\n [\"daemon\", \"notasub\"],\n [\"pool\", \"notasub\"],\n [\"blockdev\", \"notasub\"],\n [\"filesystem\", \"notasub\"],\n ]:\n for prefix in [[], [\"--propagate\"]]:\n self.check_system_exit(prefix + command_line, _PARSE_ERROR)",
"def testError(self):\n cmds = \"\"\"chown 0 missingFile\npwd\nexit\n\"\"\"\n\n def _cbCheckResult(res):\n self.assertNotIn(self.testDir.asBytesMode().path, res)\n\n d = self._getBatchOutput(cmds)\n d.addCallback(_cbCheckResult)\n return d",
"def slurm_check(path='.'):\n for file in shrunner.get_files(path, '.out'):\n with open(file, 'r') as f:\n for line in f.read().splitlines():\n if 'issue' in line.lower():\n print(file)\n print(line)\n print('-' * 40)",
"def checkForCommand(quickLogger, commandList):\n\n for command in commandList:\n\n cmd = \"which -s \" + command + \" > \" + os.devnull + \" 2>&1\"\n retcode = os.system(cmd)\n \n if(retcode):\n quickLogger.critical(\"unix command \"+command+\" not found.\")\n raise CommandNotFound"
] | [
"0.6216581",
"0.5714732",
"0.56718695",
"0.56527007",
"0.5601208",
"0.55626947",
"0.5551729",
"0.5527082",
"0.5517359",
"0.54875124",
"0.5445772",
"0.5432267",
"0.54280514",
"0.54172146",
"0.5366206",
"0.5357607",
"0.5353905",
"0.5338872",
"0.5335011",
"0.5333496",
"0.53188115",
"0.53051627",
"0.52903223",
"0.5280883",
"0.52702796",
"0.52629435",
"0.5249522",
"0.5249344",
"0.52456796",
"0.52426404"
] | 0.67288995 | 0 |
Writes the output in simple shell script format. The default format is a shell script file containing the command lines. | def write_default(workflows, output_dir):
# Calculate the total number of commands
number_of_commands = 0
for workflow in workflows:
number_of_commands += sum(map(len, workflow))
# Create command line strings
i = 0
out_lines = ['echo Started executing shell script at:', 'date']
for workflow in workflows:
for workflow_step in workflow:
for cmd in workflow_step:
i += 1
cmd_list = cmd.command_lines
cmd_list = map(clean_command_lines, cmd_list)
out_lines.append('echo Executing command {0}/{1}:'
.format(i, number_of_commands))
for c in cmd_list:
c = c.replace('>', '\\>')
c = c.replace('|', '\\|')
out_lines.append('echo ' + c)
out_lines.append('date')
#Load modules
if cmd.load_module:
for module in cmd.load_module:
out_lines.append(module)
#The command
out_lines += cmd_list
#Unload modules
if cmd.unload_module:
for module in cmd.unload_module:
out_lines.append(module)
out_lines.append('echo Finished at:')
out_lines.append('date')
#Open and write command lines
fl_name = '{0}_output_{1}.sh'.format(NAME, START_TIME)
output_file_path = os.path.join(output_dir, fl_name)
try:
out_fl = open(output_file_path, 'w')
except:
raise STAPLERerror.STAPLERerror('Unable to create output file:'
'\n{0}'.format(os.path.join(output_dir,
fl_name)))
out_fl.write('#!/usr/bin/env bash\n')
out_fl.write('\n'.join(out_lines))
out_fl.close()
return [output_file_path] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_subshell_file_contents(cmd, skip_module_loading, skip_module_unloading):\r\n\r\n\r\n out_lines = []\r\n\r\n # Invoke commands to produce their output command string(s)\r\n cmd_list = cmd.command_lines\r\n cmd_list = map(clean_command_lines, cmd_list)\r\n\r\n # Write current command to stdout\r\n out_lines.append('echo ' + '-'*80)\r\n out_lines.append('echo Executing the following command:')\r\n for c in cmd_list:\r\n out_lines.append('echo \"' + c + '\"')\r\n out_lines.append('date')\r\n\r\n # Write current command to errout\r\n out_lines.append('echo ' + '-'*80 + ' >&2')\r\n out_lines.append('echo Executing the following command: >&2')\r\n for c in cmd_list:\r\n out_lines.append('echo \"' + c + '\" >&2')\r\n out_lines.append('date >&2')\r\n\r\n # Write module load commands required for current command to\r\n # the output shell script\r\n if not skip_module_loading:\r\n if cmd.load_module:\r\n for module in cmd.load_module:\r\n out_lines.append(module)\r\n\r\n # Write command lines to the output shell script\r\n out_lines += cmd_list\r\n out_lines += ['#']*5\r\n\r\n # Write module unload commands required for current command\r\n # to the output shell script\r\n if not skip_module_unloading:\r\n if cmd.unload_module:\r\n for module in cmd.unload_module:\r\n out_lines.append(module)\r\n\r\n #Write to stdout\r\n out_lines.append('echo Finished at:')\r\n out_lines.append('date')\r\n #Write to errout\r\n out_lines.append('echo Finished at: >&2')\r\n out_lines.append('date >&2')\r\n\r\n return out_lines",
"def write_shell_script(dir: str, name: str, content: List[str]) -> str:\n\n script_path = os.path.join(dir, name)\n with open(script_path, \"w\") as f:\n f.write(\"#! /bin/bash\\n\")\n for line in content:\n f.write(f\"{line}\\n\")\n f.write(\"\\n\")\n\n os.chmod(script_path, 0o755)\n return script_path",
"def sysfileout():\n\n if sys.platform.startswith('linux') or sys.platform.startswith('darwin'):\n fileout=' >> '+stdoutpath()\n else:\n fileout=''\n\n return fileout",
"def write_shell_scripts(airfoils, qsh_template, nsetup, ntype, out_dir):\n for nairfoil, sim_setup in airfoils.iteritems():\n for aoa in sim_setup['aoas']:\n # Create simulation name\n sim_name = create_sim_name(nairfoil, ntype, nsetup, aoa)\n # Create fluent journal file\n with open(qsh_template, 'r') as f:\n qtxt = f.read()\n # Start to replace parameters inside the journal\n qtxt = qtxt.replace('SIMNAME', sim_name)\n qtxt = qtxt.replace('in.jou', sim_name + '.jou')\n qtxt = qtxt.replace('fluent.out', sim_name + '.out')\n # Write new shell script to out_dir\n qout = sim_name + '.qsh'\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n with open(os.path.join(out_dir, qout), 'w') as f:\n f.write(qtxt)\n return True",
"def run_script(input_file, script_name, interpreter='python'):\r\n from paver.easy import sh\r\n from paver.path import path\r\n rundir = path(input_file).dirname()\r\n output_text = sh('cd %(rundir)s && %(interpreter)s %(script_name)s 2>&1' % vars(), capture=True)\r\n response = '\\n::\\n\\n\\t$ %(interpreter)s %(script_name)s\\n\\t' % vars()\r\n response += '\\n\\t'.join(output_text.splitlines())\r\n while not response.endswith('\\n\\n'):\r\n response += '\\n'\r\n return response",
"def shell():\n parser = argparse.ArgumentParser(\n \n description='pyrpipe diagnostic utility\\nGenerate shell script.',\n \n usage='''pyrpipe_diagnostic report [<args>] <logfile>\n \n ''') \n parser.add_argument('-o', help='out file \\ndefault: same as input logfile',action=\"store\")\n parser.add_argument('-c',help='Dump command options [(a)ll,fa(i)l,(p)ass]\\ndefault: a',default='a',action=\"store\")\n parser.add_argument('-v',help='verbose',action=\"store_true\")\n parser.add_argument('-f',help='Filter by programs. Provide a comma-separated list e.g., prefetch,STAR,bowtie2 \\ndefault None')\n parser.add_argument('logfile', help='The log file generated by pyrpipe',action=\"store\")\n args = parser.parse_args(sys.argv[2:])\n \n logFile=args.logfile \n #parse args\n vFlag=args.v\n if vFlag:\n print(\"Generating report\")\n outFile=\"\"\n if args.o is None:\n outFile=pu.get_file_basename(logFile)\n else:\n outFile=args.o\n outFile+='.sh'\n \n filters=[]\n if args.f is not None:\n filters= args.f.split(',')\n \n reports.generateBashScript(logFile,outFile,filters,args.c)",
"def make_output(args, stdout=sys.stdout):\n if args.outfile:\n return FileOutput(args.outfile)\n else:\n return StreamOutput(stdout)",
"def standard_output(self) -> global___Statement.StandardOutput:",
"def createbash(self,executable,**keywords):\n\t\timport os\n\t\timport stat\n\n\t\toutputname = os.path.join(\"Results\",self.outputfile.replace(\".root\",\"_${SGE_TASK_ID}.root\"))\n\t\t# Extract the input files\n\t\tinputfiles = \"\"\n\t\tfor f in self.inputfiles:\n\t\t\tinputfiles += f+\",\"\n\t\tinputfiles = inputfiles[:-1]\n\n\t\tlines = \"#!/bin/bash\\n\"\n\t\tlines += \"\\n# Script created automatically by skimfiles.py utility\\n\"\n\t\tlines += \"\\nmkdir -p Results\\n\"\n\t\tlines += \"export PATH=$PATH:\"+os.path.join(self.basedir,\"bin\")+\":\"+os.path.join(self.pkgpath,\"bin\")+\"\\n\"\n\t\tlines += \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:\"+self.libsdir+\"\\n\"\n\t\tlines += \"\\n\"\n\t\tlines += \"EVENTFILE=\"+self.eventsfile+\"\\n\"\n\t\tlines += \"EVENTS=$(cat $EVENTFILE | head -n $SGE_TASK_ID | tail -n 1)\\n\"\n\t\tlines += executable+\" \"+self.cutid+\" -i \"+inputfiles+\" -c \"+self.cutfile+\\\n\t\t\t\t\" -e $EVENTS -o \"+outputname+\"\\n\"\n\t\n\t\tfilename = self.nameID+\".sh\"\n\t\tf = open(filename,\"w\")\n\t\tf.writelines(lines)\n\t\tf.close()\n\t\tos.chmod(filename,stat.S_IRWXU+stat.S_IRGRP+stat.S_IXGRP+stat.S_IXOTH)\n\t\t\n\t\treturn filename",
"def write_output(self):",
"def write_bash_script(command_line, write_folder, script_name=\"run_script.sh\"):\n script_path = join_folder(write_folder, script_name) # script location\n with open(script_path, 'w') as script:\n script.write(\"#! /bin/bash\\n\")\n script.write(\"# The script is designed to run the following command:\\n\")\n script.write(command_line)\n # run permissions for the script:\n os.fchmod(script_path, S_IEXEC)\n return script_path",
"def main(args):\n # Results: print to console and also write to output file\n pass",
"def dumpf(self, gzip=False):\n if 0 != len(self.sources):\n os.mkdir(self.name)\n filename = os.path.join(self.name, 'bootstrap.sh')\n f = codecs.open(filename, 'w', encoding='utf-8')\n elif gzip:\n filename = '{0}.sh.gz'.format(self.name)\n f = gziplib.open(filename, 'w')\n else:\n filename = '{0}.sh'.format(self.name)\n f = codecs.open(filename, 'w', encoding='utf-8')\n f.write(self.comment)\n f.write('cd \"$(dirname \"$0\")\"\\n')\n for filename2, content in sorted(self.sources.iteritems()):\n f2 = open(os.path.join(self.name, filename2), 'w')\n f2.write(content)\n f2.close()\n for out in self.out:\n f.write(out)\n f.close()\n if gzip and 0 != len(self.sources):\n filename = 'sh-{0}.tar.gz'.format(self.name)\n tarball = tarfile.open(filename, 'w:gz')\n tarball.add(self.name)\n tarball.close()\n return filename\n return filename",
"def run_script(input_file, run_dir, script_name, interpreter='python'):\n from paver.runtime import sh\n from paver.path import path\n docdir = path(input_file).dirname()\n output_text = sh('cd %(docdir)s/%(run_dir)s;%(interpreter)s %(script_name)s 2>&1' % vars(),\n capture=True)\n response = '\\n::\\n\\n\\t$ %(interpreter)s %(script_name)s\\n\\t' % vars()\n response += '\\n\\t'.join(output_text.splitlines())\n while not response.endswith('\\n\\n'):\n response += '\\n'\n return response",
"def writeSystMergeScript(self, out_path, script_path, **options):\n # writing script\n script = \"#!/bin/bash \\n\"\n if self.pp.cmsswpath != '':\n script += cmssw_head.format(scram_arch = os.environ['SCRAM_ARCH'], \n cmssw_base = self.pp.cmsswpath)\n # export additional variables \n for name in options:\n script += \"export {}='{}'\\n\".format(name, options[name])\n # first copy the backup file in case jobs have to be resubmitted\n\n script += \"\\ncp ${BACKUP} ${INFILE}\\n\"\n script += \"\\npython {}\".format(script_path)\n\n # writing script to file and chmodding\n with open(out_path, \"w\") as f:\n f.write(script)\n st = os.stat(out_path)\n os.chmod(out_path, st.st_mode | stat.S_IEXEC)\n\n if os.path.exists(out_path):\n return out_path\n else:\n return None",
"def write_inno_script (self, fd):\n print(\"; WARNING: This script has been created by py2exe. Changes to this script\", file=fd)\n print(\"; will be overwritten the next time py2exe is run!\", file=fd)\n print(\"[Setup]\", file=fd)\n print(\"AppName=%s\" % self.name, file=fd)\n print(\"AppVerName=%s %s\" % (self.name, self.version), file=fd)\n print(\"ChangesEnvironment=true\", file=fd)\n print(r\"DefaultDirName={pf}\\%s\" % self.name, file=fd)\n print(\"DefaultGroupName=%s\" % self.name, file=fd)\n print(\"OutputBaseFilename=%s\" % self.distfilebase, file=fd)\n print(\"OutputDir=..\", file=fd)\n print(\"SetupIconFile=%s\" % self.icon, file=fd)\n print(file=fd)\n print(\"[Tasks]\", file=fd)\n print(\"Name: modifypath; Description: Add application directory to %PATH%\", file=fd)\n print(file=fd)\n # List of source files\n files = self.windows_exe_files + \\\n self.console_exe_files + \\\n self.service_exe_files + \\\n self.comserver_files + \\\n self.lib_files\n print('[Files]', file=fd)\n for path in files:\n print(r'Source: \"%s\"; DestDir: \"{app}\\%s\"; Flags: ignoreversion' % (path, os.path.dirname(path)), file=fd)\n # Set icon filename\n print('[Icons]', file=fd)\n for path in self.windows_exe_files:\n print(r'Name: \"{group}\\%s\"; Filename: \"{app}\\%s\"' %\n (self.name, path), file=fd)\n for path in self.console_exe_files:\n name = os.path.basename(path).capitalize()\n print(r'Name: \"{group}\\%s help\"; Filename: \"cmd.exe\"; Parameters: \"/K %s --help\"' % (name, path), file=fd)\n print(r'Name: \"{group}\\Uninstall %s\"; Filename: \"{uninstallexe}\"' % self.name, file=fd)\n print(file=fd)\n # Uninstall optional log files\n print('[UninstallDelete]', file=fd)\n for path in (self.console_exe_files + self.windows_exe_files):\n exename = os.path.basename(path)\n print(r'Type: files; Name: \"{pf}\\%s\\%s.log\"' % (self.lname, exename), file=fd)\n print(file=fd)\n # Add app dir to PATH\n print(\"[Code]\", file=fd)\n print(\"\"\"\\\nconst\n ModPathName = 'modifypath';\n ModPathType = 'user';\n\nfunction ModPathDir(): TArrayOfString;\nbegin\n setArrayLength(Result, 1)\n Result[0] := ExpandConstant('{app}');\nend;\n#include \"modpath.iss\"\n\"\"\", file=fd)\n shutil.copy(r\"scripts\\modpath.iss\", \"dist\")",
"def Write(self):\n if self._project_definition.name in self._PROJECTS_WITH_PYTHON3_AS_DEFAULT:\n shebang = '#!/usr/bin/env python3'\n else:\n shebang = '#!/usr/bin/env python'\n\n template_mappings = {\n 'project_name': self._project_definition.name,\n 'shebang': shebang,\n }\n\n if self._project_definition.name == 'plaso':\n template_file = 'check_dependencies-with_url.py'\n else:\n template_file = 'check_dependencies.py'\n\n template_file = os.path.join(\n self._l2tdevtools_path, self._TEMPLATE_DIRECTORY, template_file)\n file_content = self._GenerateFromTemplate(template_file, template_mappings)\n\n with io.open(self.PATH, 'w', encoding='utf-8') as file_object:\n file_object.write(file_content)",
"def _generate_local_shell_script(arg_list, shell_env, working_dir, deploy_environment, **kwargs):\n script_file = tempfile.NamedTemporaryFile(delete=False)\n logger.debug(\"script file name: \" + script_file.name)\n\n #TODO: Make this less hard-coded\n commander_bin_path = os.path.join(LOCAL_DD_BINS_PATH, 'dml_commander_startup')\n worker_bin_path = os.path.join(LOCAL_DD_BINS_PATH, 'dml_worker_startup')\n\n for k, v in shell_env.items():\n script_file.write(\"export %s=%s\\n\" % (str(k), str(v)))\n\n script_file.write(\"env\\n\")\n script_file.write(\"if [ $MY_RANK -eq 0 ]; then\\n\")\n\n script_file.write(\" %s \" % commander_bin_path)\n for arg in arg_list[0]:\n if len(arg) > 7 and arg[0:7] == \"--args=\":\n script_file.write(arg[0:7] + '\"' + arg[7:] + '\" ')\n else:\n script_file.write(arg + \" \")\n script_file.write(\"1> %s/commander.log.stdout \" % working_dir)\n script_file.write(\"2> %s/commander.log.stderr \" % working_dir)\n script_file.write(\"&\\n\")\n script_file.write(\"fi\\n\")\n\n script_file.write(\"%s \" % worker_bin_path)\n for arg in arg_list[1]:\n script_file.write(arg + \" \")\n script_file.write(\"1> %s/worker_${MY_RANK}.log.stdout \" % working_dir)\n script_file.write(\"2> %s/worker.${MY_RANK}.log.stderr\" % working_dir)\n script_file.write(\"\\n\")\n script_file.close()\n return script_file.name",
"def output(text):\n sys.stdout.write(text)",
"def write_package_scripts(self, output_dir):\n manifest_sh = os.path.join(output_dir, 'manifest.pkgs.sh')\n installed_sh = os.path.join(output_dir, 'installed.pkgs.sh')\n\n minimal_sh = os.path.join(output_dir, 'minimal.pkgs.sh')\n also_installed_sh = os.path.join(output_dir, 'also_installed.pkgs.sh')\n uninstalled_sh = os.path.join(output_dir, 'uninstalled.pkgs.sh')\n\n with open(manifest_sh, 'w') as f:\n for pkgname in self.manifest:\n print(\"manifest: %s\" % pkgname)\n f.write(\"apt-get install %s\" % pkgname)\n f.write(\"\\n\")\n with open(installed_sh, 'w') as f:\n for pkgname in self.manifest:\n print(\"installed: %s\" % pkgname)\n f.write(\"apt-get install %s\" % pkgname)\n f.write(\"\\n\")\n\n with open(minimal_sh, 'w') as f:\n for pkgname in self.minimal:\n print(\"min: %s\" % pkgname)\n f.write(\"apt-get install %s\" % pkgname)\n f.write(\"\\n\")\n with open(also_installed_sh, 'w') as f:\n for pkgname in self.also_installed:\n print(\"als: %s\" % pkgname)\n f.write(\"apt-get install %s\" % pkgname)\n f.write(\"\\n\")\n with open(uninstalled_sh, 'w') as f:\n for pkgname in self.uninstalled:\n print(\"uni: %s\" % pkgname)\n f.write(\"apt-get remove %s\" % pkgname)\n f.write(\"\\n\")",
"def help_shell(self):\n help_str = \"\"\"Execute a command as if at the OS prompt.\n\n Usage: shell cmd\"\"\"\n self.stdout.write(\"{}\\n\".format(help_str))",
"def write_qsub_script(self, filename, echo=False):\n\n buf = ['#!/usr/bin/env qsub', '# Written using SGE module']\n\n for option, value in self.args.__dict__.items():\n if value is True:\n value = ''\n\n if option not in ['command', 'command_args', 'xterm_args']:\n if isinstance(value, list):\n val = ' '.join(value)\n else:\n val = str(value)\n\n buf.append(' '.join(['#', '-' + option, val]))\n\n args = getattr(self.args, 'command_args', [])\n args = getattr(self.args, 'xterm_args', args)\n\n buf.append(' '.join([self.args.command] + args))\n\n if echo:\n print('\\n'.join(buf))\n\n f = open(filename, 'w')\n f.write('\\n'.join(buf))\n f.close()",
"def writeOutput(self, output):",
"def makeBashFile(directory, bpm, csv, egt, output):\n ## write bash file\n print(\"Making Bash File ... \\n\\n\")\n bash = open(directory + '/run1.sh', \"w\")\n bash.write(\"direct=\\'\" + directory + \"\\'\\n\")\n bash.write(\"bpm=\\'\" + bpm + \"\\'\\n\")\n bash.write(\"egt=\\'\" + egt + \"\\'\\n\")\n bash.write(\"csv=\\'\" + csv + \"\\'\\n\")\n bash.write(\"output=\\'\" + output + \"\\'\\n\\n\")\n bash.close()\n\n ## mash bash files\n filenames = [directory + '/run1.sh', 'pipeline/main.sh']\n with open(directory + '/final.sh', 'w') as outfile:\n for fname in filenames:\n with open(fname) as infile:\n outfile.write(infile.read())\n print(\"Finished making Bash File... \\n\\n\")",
"def write_scripts(self, out, ref, file1, file2):\n for config in self.configurations:\n program_folder = os.path.join(out, self.out)\n config.write_Strelka_script(program_folder, self.path2exe, ref, file1, file2, self.template_config)\n return None",
"def scriptGen(self,tmpd='/tmp/jose',libRev='last',submode='qsub',\n redirect=1,PBSoptions=''):\n jobname=self.name\n outdir=self.outd\n qsubdir=scratchdir+'/qsub/'+todayDate() #subdirectory to deposit the script\n if not os.path.exists(qsubdir): pastry('/bin/mkdir -p '+qsubdir)\n script=qsubdir+'/'+jobname+'.sh' #full script file name\n\n if len(jobname) > 15:\n sys.stderr.write('Error: job name '+jobname+' cannot exceed 15 characters')\n return ''\n if not os.path.exists(outdir): os.system('/bin/mkdir -p '+outdir)\n buf=''\n ulimit=int(float(mem_limit)*1024) #maximum resident memory size (Kb) to prevent swapping\n wd=tmpd+'/${PBS_JOBID}'\n #wd=tmpd+'/'+ re.compile('\\W').sub('',self.name) +'_$$' #working directory\n logname=jobname+'.log'\n local_log=wd+'/'+logname\n remote_log=outdir+'/'+logname\n buf= '#!/bin/bash\\n\\n'\n buf+= PBSoptions+'\\n\\n'\n buf+= '#bash function to update library\\n'\n buf+= self.updateNodeLib(libRev)+'\\n\\n'\n buf+= '#bash function to import temporary libs\\n'\n buf+= self.shared_temporal_libraries()+'\\n\\n'\n buf+= '#bash function to clean exit\\n'\n buf+= self.cleanup_exit(submode=submode)+'\\n\\n'\n buf+= 'echo \"'+script+'\"\\n' #write script name withing script body\n buf+= 'hostname\\n' #node where job will be run\n buf+= 'echo $PBS_JOBID\\n'\n buf+= 'ulimit -m '+`ulimit`+' #maximum memory\\n'\n buf+= 'source ~/.bash_profile >/dev/null #environment variables\\n'\n buf+= 'wd='+wd+' #working directory\\n'\n buf+= '/bin/mkdir -p $wd\\n'\n buf+= 'export LOCAL_LOG=\"'+local_log+'\"\\n'\n buf+= '/bin/touch $LOCAL_LOG\\n'\n if submode=='sub' and redirect:\n buf+='exec &> $LOCAL_LOG #redirect STODOUT, STDERR to LOCAL_LOG\\n' \n buf+= 'export REMOTE_LOG=\"'+remote_log+'\"\\n'\n\n but+= '#clean up old log file\\n'\n buf+= 'if [ -f $REMOTE_LOG ]; then\\n' \n buf+= ' /bin/rm -f $REMOTE_LOG\\n'\n buf+= 'fi\\n\\n'\n\n buf+= 'trap \"cleanup_exit 1\" TERM #in case of killing job\\n\\n'\n\n buf+= '#update node code library && import libraries\\n'\n buf+= 'if !('\n buf+= 'updateNodeLib && ' \n buf+= 'shared_temporal_libraries _PREPARE_'\n buf+= ');then\\n'\n buf+= ' cleanup_exit 1\\n'\n buf+= 'fi\\n\\n'\n \n buf+= '/bin/cp '+' '.join(self.inpl)+' $wd #bring input files\\n' \n buf+= 'cd $wd\\n\\n'\n buf+= '#Test command success\\n'\n buf+= 'exs=0 #variable holding script exit status\\n'\n buf+= 'if !('\n buf+= self.exe\n buf+= ');then\\n'\n buf+= ' exs=1\\n'\n buf+= 'fi\\n\\n'\n buf+= '#move even partial results (exs=1)\\n'\n buf+= '/bin/mv '+' '.join(self.outl)+' '+outdir+'\\n'\n buf+= 'cleanup_exit $exs'\n\n open(script,'w').write(buf)\n pastry('chmod u+x '+script)\n\n return script",
"def run_shell_script(file_name: str) -> None:\n cmd = [\"./\" + file_name]\n p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n for line in iter(p.stdout.readline, b\"\"):\n print(\">>> \" + line.decode().rstrip())",
"def main():\n parser = argparse.ArgumentParser(description=\"Script for generating an index template out of a document\")\n parser.add_argument(\"INDEX_NAME\", help=\"Name of index\")\n parser.add_argument(\"--output_file\", help=\"File to write schema to\")\n args = parser.parse_args()\n\n output = generate_template(args.INDEX_NAME)\n if args.output_file:\n with open(args.output_file, \"w\") as file:\n json.dump(output, file, ensure_ascii=False, indent=4, sort_keys=True)\n else:\n print(json.dumps(output, ensure_ascii=False, indent=4, sort_keys=True))",
"def outputFromPythonScript(script, *args):\n with open(devnull, \"rb\") as nullInput, open(devnull, \"wb\") as nullError:\n process = Popen(\n [executable, script.path] + list(args),\n stdout=PIPE,\n stderr=nullError,\n stdin=nullInput,\n )\n stdout = process.communicate()[0]\n return stdout",
"def create_sh_script(\n unblur_path, input_image, output_dir,\n input_dir, input_suffix, options\n ):\n strSh = ''\n\n # To make sure it is a bash script\n strSh += '#!/bin/bash\\n\\n'\n\n # Export number of threads\n strSh += 'export OMP_NUM_THREADS={:d}\\n'.format(options.nr_threads)\n\n # The script will abort with non-zero exit values\n strSh += '# The script will abort with non-zero exit values\\n'\n strSh += 'set -e\\n'\n\n # Create a file list of all files\n strSh += '# Create a file list of all files\\n'\n strSh += 'fileList=$(ls {:s})\\n'.format(\n input_image\n )\n\n # Create folders\n strSh += '# Create folders\\n'\n strSh += 'mkdir -p {:s}/Doseuncorrected\\n'.format(output_dir)\n\n strSh += 'mkdir -p {:s}/Shift\\n'.format(output_dir)\n\n strSh += 'mkdir -p {:s}/Temp\\n'.format(output_dir)\n\n if options.filter_sum:\n strSh += 'mkdir -p {:s}/Filtered\\n'.format(output_dir)\n\n if options.dose_filter:\n strSh += 'mkdir -p {:s}/Dosecorrected\\n'.format(output_dir)\n\n if options.expert_mode:\n strSh += 'mkdir -p {:s}/FRC\\n\\n'.format(output_dir)\n\n # Abort script if files in Doseuncorrected already exists\n strSh += '# Abort script if files in Doseuncorrected already exists\\n'\n strSh += 'for f in {:s}/Doseuncorrected/*\\n'.format(output_dir)\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'echo \"Some files already exists, please choose another output directory\"\\n'\n strSh += 'exit 1\\n'\n strSh += 'break\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Abort script if files in shift already exists\n strSh += '# Abort script if files in shift already exists\\n'\n strSh += 'for f in {:s}/Shift/*\\n'.format(output_dir)\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'echo \"Some files already exists, please choose another output directory\"\\n'\n strSh += 'exit 1\\n'\n strSh += 'break\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Abort script if files in Dosecorrected already exists\n strSh += '# Abort script if files in Dosecorrected already exists\\n'\n strSh += 'for f in {:s}/Dosecorrected/*\\n'.format(output_dir)\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'echo \"Some files already exists, please choose another output directory\"\\n'\n strSh += 'exit 1\\n'\n strSh += 'break\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Abort script if files in Filtered already exists\n strSh += '# Abort script if files in Filtered already exists\\n'\n strSh += 'for f in {:s}/Filtered/*\\n'.format(output_dir)\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'echo \"Some files already exists, please choose another output directory\"\\n'\n strSh += 'exit 1\\n'\n strSh += 'break\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Abort script if files in FRC already exists\n strSh += '# Abort script if files in FRC already exists\\n'\n strSh += 'for f in {:s}/FRC/*\\n'.format(output_dir)\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'echo \"Some files already exists, please choose another output directory\"\\n'\n strSh += 'exit 1\\n'\n strSh += 'break\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Loop over all files\n strSh += '\\nfor file in $fileList\\ndo\\n\\n'\n\n strSh += 'baseName=${{file%{:s}}}\\n'.format(input_suffix)\n strSh += 'baseName=${{baseName#{:s}}}\\n'.format(input_dir)\n\n # Create a temporary file to work with to prevent format issues\n strSh += '# Create a temporary file to work with to prevent format issues\\n'\n strSh += 'e2proc3d.py $file {:s}/Temp/${{baseName}}_temp.mrc\\n\\n'.format(output_dir)\n\n # Remove some temporary files that unblur makes\n strSh += '# Remove some temporary files that unblur makes\\n'\n strSh += 'for f in .UnBlur*\\n'\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'rm .UnBlur*\\n'\n strSh += 'break\\n'\n strSh += 'else\\n'\n strSh += 'true\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # Start Unblur without dose correction\n strSh += '{:s} << eof\\n'.format(unblur_path)\n\n # Input File\n strSh += '{:s}/Temp/${{baseName}}_temp.mrc\\n'.format(output_dir)\n # Number of Frames\n strSh += '{:d}\\n'.format(options.nr_frames)\n # Sum File\n strSh += '{:s}/Doseuncorrected/${{baseName}}{:s}.mrc\\n'.format(\n output_dir,\n options.sum_suffix\n )\n # Shift File\n strSh += '{:s}/Shift/${{baseName}}{:s}.txt\\n'.format(\n output_dir,\n options.shift_suffix\n )\n # Pixel Size\n strSh += '{:f}\\n'.format(options.pixel_size)\n\n # Say no to Dose Filtering\n strSh += 'NO\\n'\n\n if options.save_frames:\n # Say yes to Save Frames\n strSh += 'YES\\n'\n # Frames file\n strSh += '{:s}/Doseuncorrected/${{baseName}}{:s}{:s}.mrc\\n'.format(\n output_dir,\n options.sum_suffix,\n options.frames_suffix\n )\n else:\n # Say no to Save Frames\n strSh += 'NO\\n'\n\n if options.expert_mode:\n # Say yes to Expert Mode\n strSh += 'YES\\n'\n # FRC File\n strSh += '{:s}/FRC/${{baseName}}{:s}.txt\\n'.format(\n output_dir,\n options.frc_suffix\n )\n # Minimum Shift for initial search\n strSh += '{:f}\\n'.format(options.shift_initial)\n # Outer Radius Shift Limit\n strSh += '{:f}\\n'.format(options.shift_radius)\n # B-Factor to Apply\n strSh += '{:f}\\n'.format(options.b_factor)\n # Half-Width Vertical\n strSh += '{:d}\\n'.format(options.fourier_vertical)\n # Hald-Width Horizontal\n strSh += '{:d}\\n'.format(options.fourier_horizontal)\n # Termination Shift Threshold\n strSh += '{:f}\\n'.format(options.shift_threshold)\n # Maximum Iterations\n strSh += '{:d}\\n'.format(options.iterations)\n # Restore Noise Power\n if options.restore_noise:\n # Say yes to Restore Noise Power\n strSh += 'YES\\n'\n else:\n # Say no to Restore Noise Power\n strSh += 'NO\\n'\n # Verbose Output\n if options.verbose:\n # Say yes to Verbose Output\n strSh += 'YES\\n'\n else:\n # Say no to Verbose Output\n strSh += 'NO\\n'\n else:\n # Say no to Expert Mode\n strSh += 'NO\\n'\n\n # Enf of file reached\n strSh += 'eof\\n\\n'\n\n # Remove some temporary files that unblur makes\n strSh += 'for f in .UnBlur*\\n'\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'rm .UnBlur*\\n'\n strSh += 'break\\n'\n strSh += 'else\\n'\n strSh += 'true\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n # =========== #\n if options.dose_filter:\n\n # Start Unblur with dose correction\n strSh += '{:s} << eof\\n'.format(unblur_path)\n\n # Input File\n strSh += '{:s}/Temp/${{baseName}}_temp.mrc\\n'.format(output_dir)\n # Number of Frames\n strSh += '{:d}\\n'.format(options.nr_frames)\n # Sum File\n strSh += '{:s}/Dosecorrected/${{baseName}}{:s}.mrc\\n'.format(\n output_dir,\n options.sum_suffix\n )\n # Shift File\n strSh += '{:s}/Shift/${{baseName}}{:s}.txt\\n'.format(\n output_dir,\n options.shift_suffix\n )\n # Pixel Size\n strSh += '{:f}\\n'.format(options.pixel_size)\n\n # Say yes to Dose Filtering\n strSh += 'YES\\n'\n # Exposure per Frame\n strSh += '{:f}\\n'.format(options.exposure_per_frame)\n # Acceleration Voltage\n strSh += '{:f}\\n'.format(options.voltage)\n # Pre Exposure\n strSh += '{:f}\\n'.format(options.pre_exposure)\n\n if options.save_frames:\n # Say yes to Save Frames\n strSh += 'YES\\n'\n # Frames file\n strSh += '{:s}/Dosecorrected/${{baseName}}{:s}{:s}.mrc\\n'.format(\n output_dir,\n options.sum_suffix,\n options.frames_suffix\n )\n else:\n # Say no to Save Frames\n strSh += 'NO\\n'\n\n if options.expert_mode:\n # Say yes to Expert Mode\n strSh += 'YES\\n'\n # FRC File\n strSh += '{:s}/FRC/${{baseName}}{:s}.txt\\n'.format(\n output_dir,\n options.frc_suffix\n )\n # Minimum Shift for initial search\n strSh += '{:f}\\n'.format(options.shift_initial)\n # Outer Radius Shift Limit\n strSh += '{:f}\\n'.format(options.shift_radius)\n # B-Factor to Apply\n strSh += '{:f}\\n'.format(options.b_factor)\n # Half-Width Vertical\n strSh += '{:d}\\n'.format(options.fourier_vertical)\n # Hald-Width Horizontal\n strSh += '{:d}\\n'.format(options.fourier_horizontal)\n # Termination Shift Threshold\n strSh += '{:f}\\n'.format(options.shift_threshold)\n # Maximum Iterations\n strSh += '{:d}\\n'.format(options.iterations)\n # Restore Noise Power\n if options.restore_noise:\n # Say yes to Restore Noise Power\n strSh += 'YES\\n'\n else:\n # Say no to Restore Noise Power\n strSh += 'NO\\n'\n # Verbose Output\n if options.verbose:\n # Say yes to Verbose Output\n strSh += 'YES\\n'\n else:\n # Say no to Verbose Output\n strSh += 'NO\\n'\n else:\n # Say no to Expert Mode\n strSh += 'NO\\n'\n\n # Enf of file reached\n strSh += 'eof\\n\\n'\n\n # Remove temporary file\n strSh += 'rm {:s}/Temp/${{baseName}}_temp.mrc\\n'.format(output_dir)\n\n # Remove some temporary files that unblur makes\n # Remove some temporary files that unblur makes\n strSh += 'for f in .UnBlur*\\n'\n strSh += 'do\\n'\n strSh += 'if [ -e $f ]\\n'\n strSh += 'then\\n'\n strSh += 'rm .UnBlur*\\n'\n strSh += 'break\\n'\n strSh += 'else\\n'\n strSh += 'true\\n'\n strSh += 'fi\\n'\n strSh += 'done\\n\\n'\n\n if options.filter_sum:\n # Filter Images\n lowpass_angstrom = options.pixel_size / options.lowpass\n highpass_angstrom = options.pixel_size / options.highpass\n strSh += \\\n 'e2proc3d.py {:s}/Doseuncorrected/${{baseName}}{:s}.mrc '.format(\n output_dir,\n options.sum_suffix\n )\n strSh += '{:s}/Filtered/${{baseName}}{:s}.mrc ' \\\n .format(\n output_dir,\n options.sum_suffix\n )\n strSh += '--process=filter.lowpass.gauss:cutoff_freq={:f} '.format(\n options.lowpass\n )\n strSh += '--process=filter.highpass.gauss:cutoff_freq={:f}\\n\\n' \\\n .format(\n options.highpass\n )\n\n if options.remove_sum:\n # Remove sum files\n strSh += 'rm {:s}/Doseuncorrected/${{baseName}}{:s}.mrc\\n'.format(\n output_dir,\n options.sum_suffix\n )\n\n # Done\n strSh += 'done\\n\\n'\n\n # Remove temp folder\n strSh += 'rm -r {:s}/Temp\\n'.format(output_dir)\n\n strSh += 'echo \"All done!\"'\n\n # Write Output\n with open('{:s}/scriptUnblur.sh'.format(output_dir), 'w') as f:\n f.write(strSh)"
] | [
"0.6161045",
"0.5830803",
"0.5830141",
"0.5728146",
"0.5581661",
"0.5542562",
"0.5534459",
"0.5473914",
"0.5465518",
"0.54606724",
"0.5454724",
"0.54491675",
"0.5440373",
"0.54200655",
"0.54052734",
"0.5380631",
"0.53805697",
"0.53546333",
"0.534062",
"0.5320091",
"0.53145796",
"0.5288436",
"0.5284902",
"0.5239219",
"0.5228699",
"0.5228072",
"0.5205056",
"0.514884",
"0.5137585",
"0.51095325"
] | 0.6917708 | 0 |
Writes the output in LSF job array format. Creates sub shell scripts that contain the workflow for each input file separately. After this main shell script containing TORQUE configuration is created. This script is responsible for starting the sub shells as separate processes. | def write_lsf(workloads, input_file_parameters, command_line_parameters):
workload_index = 0
workload_zfill_amount = len(str(len(workloads)))
workload_file_paths = []
for workload in workloads:
# Each workflow part will have separate file to submit to TORQUE with
# sbatch command. Each file has one or more associated subshell files
# containing contents for each thread.
# Generate strings describing current workload and thread indexes for
# output file names
workload_index += 1
workload_index_string = str(workload_index).zfill(workload_zfill_amount)
file_main_name = '{0}_LSF_WORKLOAD_{1}'.format(NAME,
workload_index_string)
# When --fix_run mode is used the output and log files files already
# exist. To prevent overwriting these files with new ones specific
# prefix or appendix strings are added to the new output file names.
appendix = '.sh'
i = 0
if command_line_parameters.fix_run:
mode = 'FIX'
elif command_line_parameters.compress_run == 'compress':
mode = 'COMPRESS'
elif command_line_parameters.compress_run == 'decompress':
mode = 'DECOMPRESS'
else:
mode = None
while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,
file_main_name + appendix)):
i += 1
appendix = '_{0}_{1}.sh'.format(mode, i)
# Generate subshell files
thread_index = 0
for thread_contents in workload:
# Iterate over output commands of each thread and write necessary
# subshell files for each
out_lines = []
cmds_in_thread = len(thread_contents)
for i in xrange(cmds_in_thread):
# Check if any modules need loading or are they loaded by previous command
skip_module_loading = False
if i > 0:
if thread_contents[i].load_module == thread_contents[i-1].load_module:
skip_module_loading = True
# Check if any modules need unloading or will they be used by following command
skip_module_unloading = False
if i < cmds_in_thread-1:
if thread_contents[i].load_module == thread_contents[i+1].load_module:
skip_module_unloading = True
out_lines += generate_subshell_file_contents(thread_contents[i],
skip_module_loading,
skip_module_unloading)
# Write subshell file
thread_index_string = str(thread_index)
fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,
workload_index_string,
thread_index_string,
appendix)
try:
out_fl = open(os.path.join(input_file_parameters.output_dir,
fl_name), 'w')
except:
raise STAPLERerror.STAPLERerror('Unable to create output file:'
'\n{0}'.format(os.path.join(
input_file_parameters.output_dir,
fl_name)))
out_fl.write('\n'.join(out_lines))
out_fl.write('\n')
out_fl.close()
thread_index += 1
# Generate parameter file for the bsub run
resmng_config = []
resmng_config.append('#BSUB-J "{0}[1-{1}]"'.format(
input_file_parameters.job_name,
len(workload)))
resmng_config.append('#BSUB-i {0}_WORKLOAD_{1}_subshell_{2}{3}'.format(
NAME,
workload_index_string,
'%I',
appendix))
resmng_config.append('#BSUB-o {0}_WORKLOAD_{1}_subshell_{2}{3}.out'.format(
NAME,
workload_index_string,
'%I',
appendix))
resmng_config += input_file_parameters.resource_manager_params
out_fl_path = os.path.join(input_file_parameters.output_dir, file_main_name + appendix)
workload_file_paths.append(out_fl_path)
try:
out_fl = open(out_fl_path, 'w')
except IOError as emsg:
raise STAPLERerror.STAPLERerror('Unable to create output file:'
'\n{0}\n with error message:\n{1}'
.format(os.path.join(input_file_parameters.output_dir,
file_main_name + appendix),
str(emsg)))
out_fl.write('\n'.join(resmng_config))
out_fl.write('\n')
out_fl.close()
return workload_file_paths | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_unix(workloads, input_file_parameters, command_line_parameters):\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n background_process_list = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_UNIX_WORKLOAD_1'.format(NAME)\r\n\r\n # Add information about current workflow to the main shell script\r\n background_process_list.append('echo \"Running workload part {0}\"'.format(\r\n workload_index))\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is 'FIX' and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n if mode in ('COMPRESS', 'DECOMPRESS'):\r\n appendix = '_{0}.sh'.format(mode)\r\n while os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n thread_zfill_amount = len(str(len(workload)))\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index).zfill(thread_zfill_amount)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n # i.e. use UNIX source to run input shell script, redirect stdout\r\n # and stderr to an .out file.\r\n background_process_list.append('source {0} >> {0}.out 2>&1 &'.format(\r\n os.path.join(input_file_parameters.output_dir,\r\n fl_name)))\r\n thread_index += 1\r\n\r\n # Workflow steps are written to a single output file (instead of\r\n # separate files). \"wait\" command is inserted in between workflow parts\r\n # to synchronize workflows.\r\n background_process_list.append('wait\\n\\n')\r\n\r\n # Write the main shell script file\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('\\n\\n')\r\n resmng_config.append('\\n'.join(background_process_list))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir, file_main_name + appendix)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return [out_fl_path]",
"def write_torque(workloads, input_file_parameters, command_line_parameters):\r\n validate_resource_manager_parameters(\r\n input_file_parameters.resource_manager_params,\r\n ['#PBS -k', '#PBS -N', '#PBS -d', '#PBS -e', '#PBS -t'])\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_TORQUE_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n thread_index += 1\r\n\r\n # Create lines for TORQUE input file by generating job-name, output,\r\n # error and array parameters based on user input\r\n\r\n # IF YOU ADD NEW AUTOMATICALLY INFERRED PARAMETERS, REMEMBER TO VALIDATE\r\n # THEM AT THE BEGINNING OF THIS FUNCTION\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('#PBS -k eo')\r\n resmng_config.append('#PBS -N {0}'.format(input_file_parameters.job_name))\r\n resmng_config.append('#PBS -d {0}'.format(input_file_parameters.output_dir))\r\n resmng_config.append('#PBS -e {0}'.format(input_file_parameters.output_dir))\r\n resmng_config.append('#PBS -t {0}-{1}'.format(0, len(workload)-1))\r\n\r\n resmng_config.append('\\n\\n')\r\n subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n '\"${PBS_ARRAYID}\"',\r\n appendix)\r\n subshell_file_path = os.path.join(input_file_parameters.output_dir,\r\n subshell_file_path)\r\n resmng_config.append('source {0}'.format(subshell_file_path))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir,file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def write_sge(workloads, input_file_parameters, command_line_parameters):\r\n validate_resource_manager_parameters(\r\n input_file_parameters.resource_manager_params,\r\n ['# -o', '# -e', '# -t'])\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_SGE_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n prefix = ''\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n prefix = '{0}_{1}_'.format(mode, i)\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 1\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n thread_index += 1\r\n\r\n # Create lines for SGE input file by generating job-name, output,\r\n # error and array parameters based on user input\r\n\r\n status_file_basename = os.path.join(input_file_parameters.output_dir,\r\n prefix +\r\n input_file_parameters.job_name + '_$TASK_ID')\r\n\r\n # IF YOU ADD NEW AUTOMATICALLY INFERRED PARAMETERS, REMEMBER TO VALIDATE\r\n # THEM AT THE BEGINNING OF THIS FUNCTION\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('#$ -o {0}.out'.format(status_file_basename))\r\n resmng_config.append('#$ -e {0}.err'.format(status_file_basename))\r\n resmng_config.append('#$ -t {0}-{1}'.format(1, len(workload)))\r\n\r\n resmng_config.append('\\n\\n')\r\n subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n '\"$SGE_TASK_ID\"',\r\n appendix)\r\n subshell_file_path = os.path.join(input_file_parameters.output_dir,\r\n subshell_file_path)\r\n resmng_config.append('source {0}'.format(subshell_file_path))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def write_slurm(workloads, input_file_parameters, command_line_parameters):\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to SLURM with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_SBATCH_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n prefix = ''\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n prefix = '{0}_{1}_'.format(mode, i)\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index += 1\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n\r\n # Create lines for SLURM input file by generating job-name, output,\r\n # error and array parameters based on user input\r\n status_file_basename = os.path.join(input_file_parameters.output_dir,\r\n prefix + input_file_parameters.job_name)\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('#SBATCH --job-name={0}'.format(input_file_parameters.job_name))\r\n resmng_config.append('#SBATCH --output={0}_%A_%a.out'.format(status_file_basename))\r\n resmng_config.append('#SBATCH --error={0}_%A_%a.err'.format(status_file_basename))\r\n resmng_config.append('#SBATCH --array={0}-{1}'.format(1, len(workload)))\r\n\r\n resmng_config.append('\\n\\n')\r\n subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n '\"$SLURM_ARRAY_TASK_ID\"',\r\n appendix)\r\n subshell_file_path = os.path.join(input_file_parameters.output_dir,\r\n subshell_file_path)\r\n resmng_config.append('source {0}'.format(subshell_file_path))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir,file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def gen_jobs(fpath, num_runs, netid):\n\n run = \"\"\n run += \"import sys\\n\"\n run += \"import subprocess\\n\"\n run += \"cmd_array = (\"\n for i in range(num_runs):\n run += \"r\\\"python test.py %d\\\"\" % i\n run += \",\\n\"\n\n run += \")\\n\"\n run += \"p = subprocess.Popen(cmd_array[int(sys.argv[1])-1], shell=True, stdout=subprocess.PIPE)\\n\"\n run += \"out = p.stdout.read()\"\n# run += \"print cmd_array[int(sys.argv[1])]\"\n\n script_name = \"test\"\n\n if verbose:\n print \"Writing array script: \" + \"run.\" + script_name + \".py\"\n f = open(os.path.join(fpath, \"run.\" + script_name + \".py\"), 'w')\n f.write(\"%s\\n\" % run)\n\n f = open(os.path.join(fpath, \"submit_run.\" + script_name + \".sh\"), 'w')\n submit_run = \"#!/bin/csh\\n\"\n submit_run += \"#$ -N %s\\n\" % (\"job_%d\" % num_runs)\n submit_run += \"#$ -t 1:%d\\n\" % (num_runs)\n submit_run += \"#$ -M %[email protected]\\n\\n\" % (netid)\n# submit_run += \"#$ -q short\"\n# submit_run += \"#$ -r y\"\n submit_run += \"python run.%s.py ${SGE_TASK_ID}\" % (script_name)\n\n if verbose:\n print \"Writing submit shell script: \" + \"submit_run.\" + script_name + \".sh\"\n f.write(\"%s\\n\" % submit_run)",
"def write_default(workflows, output_dir):\r\n\r\n # Calculate the total number of commands\r\n number_of_commands = 0\r\n for workflow in workflows:\r\n number_of_commands += sum(map(len, workflow))\r\n\r\n # Create command line strings\r\n i = 0\r\n out_lines = ['echo Started executing shell script at:', 'date']\r\n for workflow in workflows:\r\n for workflow_step in workflow:\r\n for cmd in workflow_step:\r\n i += 1\r\n cmd_list = cmd.command_lines\r\n cmd_list = map(clean_command_lines, cmd_list)\r\n out_lines.append('echo Executing command {0}/{1}:'\r\n .format(i, number_of_commands))\r\n for c in cmd_list:\r\n c = c.replace('>', '\\\\>')\r\n c = c.replace('|', '\\\\|')\r\n out_lines.append('echo ' + c)\r\n out_lines.append('date')\r\n\r\n #Load modules\r\n if cmd.load_module:\r\n for module in cmd.load_module:\r\n out_lines.append(module)\r\n\r\n #The command\r\n out_lines += cmd_list\r\n\r\n #Unload modules\r\n if cmd.unload_module:\r\n for module in cmd.unload_module:\r\n out_lines.append(module)\r\n out_lines.append('echo Finished at:')\r\n out_lines.append('date')\r\n\r\n #Open and write command lines\r\n fl_name = '{0}_output_{1}.sh'.format(NAME, START_TIME)\r\n output_file_path = os.path.join(output_dir, fl_name)\r\n try:\r\n out_fl = open(output_file_path, 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(output_dir,\r\n fl_name)))\r\n out_fl.write('#!/usr/bin/env bash\\n')\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.close()\r\n return [output_file_path]",
"def create_job(jobrun, vcf_filenames):\n if jobrun == \"cluster\":\n \"\"\"\n Supports only PBS clusters for now.\n \"\"\"\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M [email protected]\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n for i in pbs_scripts:\n print \"Running: qsub %s\" % i\n #os.system(\"qsub %s\" % i)\n\n elif jobrun == \"parallel-local\":\n \"\"\"\n Generate a Command list of each job and run it in parallel on different cores available on local system\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n f3 = open(command_file, 'w+')\n\n\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M [email protected]\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n\n\n for i in pbs_scripts:\n f3.write(\"bash %s\\n\" % i)\n f3.close()\n with open(command_file, 'r') as fpp:\n for lines in fpp:\n lines = lines.strip()\n command_array.append(lines)\n fpp.close()\n print len(command_array)\n if args.numcores:\n num_cores = int(num_cores)\n else:\n num_cores = multiprocessing.cpu_count()\n results = Parallel(n_jobs=num_cores)(delayed(run_command)(command) for command in command_array)\n\n elif jobrun == \"parallel-single-cluster\":\n print \" \"\n else:\n \"\"\"\n Generate a Command list of each job and run it on local system one at a time\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n os.system(\"bash %s\" % command_file)",
"def build_job_scripts(model_list, scenario_list, output_dir, cassandra_config_dir, cassandra_log_dir,\n cassandra_main_script, sbatch_account, sbatch_partition='slurm', sbatch_walltime='01:00:00',\n sbatch_ntasks=3, sbatch_nodes=3, sbatch_jobname='cassie', sbatch_logdir='.', template=None):\n\n # use default configuration template file if user does not give one\n if template is None:\n template = pkg_resources.resource_filename('cassie', 'data/sbatch_template.sh')\n\n # existing tags to replace in the template file\n model_tag = '<model>'\n scenario_tag = '<scenario>'\n account_tag = '<account>'\n partition_tag = '<partition>'\n ntasks_tag = '<ntasks>'\n nodes_tag = '<nodes>'\n time_tag = '<walltime>'\n jobname_tag = '<jobname>'\n logdir_tag = '<logdir>'\n cassandra_configdir_tag = '<cassconfigdir>'\n cassandra_logdir_tag = '<casslogdir>'\n cassandra_script_tag = '<cassmainscript>'\n\n for model in model_list:\n for scenario in scenario_list:\n\n output_file = os.path.join(output_dir, f'run_{model.lower()}_{scenario}.sh')\n\n with open(output_file, 'w') as out:\n with open(template) as get:\n\n f = get.read()\n\n # replace tag names with dynamic content\n fx = f.replace(model_tag, model)\n fx = fx.replace(scenario_tag, scenario)\n\n fx = fx.replace(account_tag, sbatch_account)\n fx = fx.replace(partition_tag, sbatch_partition)\n fx = fx.replace(ntasks_tag, str(sbatch_ntasks))\n fx = fx.replace(nodes_tag, str(sbatch_nodes))\n fx = fx.replace(time_tag, sbatch_walltime)\n fx = fx.replace(jobname_tag, sbatch_jobname)\n fx = fx.replace(logdir_tag, sbatch_logdir)\n\n fx = fx.replace(cassandra_configdir_tag, cassandra_config_dir)\n fx = fx.replace(cassandra_logdir_tag, cassandra_log_dir)\n fx = fx.replace(cassandra_script_tag, cassandra_main_script)\n\n out.write(fx)",
"def setup_jobs(self):\n transfer_args = [\"analysis_type\", \"perturbation\", \"num_permutations\", \"permutation_test_statistic\", \"loss_function\",\n \"importance_significance_level\", \"window_search_algorithm\", \"window_effect_size_threshold\"]\n jobs = [None] * self.num_jobs\n for idx in range(self.num_jobs):\n # Create and launch condor job\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n input_files = [features_filename, self.args.model_filename, self.args.model_loader_filename, self.args.data_filename]\n job_dir = f\"{self.args.output_dir}/outputs_{idx}\"\n cmd = f\"python3 -m anamod.core.worker -worker_idx {idx}\"\n for arg in transfer_args:\n if hasattr(self.args, arg):\n cmd += f\" -{arg} {getattr(self.args, arg)}\"\n # Relative file paths for non-shared FS, absolute for shared FS\n for name, path in dict(output_dir=job_dir, features_filename=features_filename, model_filename=self.args.model_filename,\n model_loader_filename=self.args.model_loader_filename, data_filename=self.args.data_filename).items():\n cmd += f\" -{name} {os.path.abspath(path)}\" if self.args.shared_filesystem else f\" -{name} {os.path.basename(path)}\"\n job = CondorJobWrapper(cmd, input_files, job_dir, shared_filesystem=self.args.shared_filesystem,\n memory=f\"{self.args.memory_requirement}GB\", disk=f\"{self.args.disk_requirement}GB\",\n avoid_bad_hosts=self.args.avoid_bad_hosts, retry_arbitrary_failures=self.args.retry_arbitrary_failures,\n cleanup=self.args.cleanup)\n jobs[idx] = job\n return jobs",
"def write_shell_scripts(airfoils, qsh_template, nsetup, ntype, out_dir):\n for nairfoil, sim_setup in airfoils.iteritems():\n for aoa in sim_setup['aoas']:\n # Create simulation name\n sim_name = create_sim_name(nairfoil, ntype, nsetup, aoa)\n # Create fluent journal file\n with open(qsh_template, 'r') as f:\n qtxt = f.read()\n # Start to replace parameters inside the journal\n qtxt = qtxt.replace('SIMNAME', sim_name)\n qtxt = qtxt.replace('in.jou', sim_name + '.jou')\n qtxt = qtxt.replace('fluent.out', sim_name + '.out')\n # Write new shell script to out_dir\n qout = sim_name + '.qsh'\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n with open(os.path.join(out_dir, qout), 'w') as f:\n f.write(qtxt)\n return True",
"def job_workflow(workflow, jobfiles, jwcl=WCL()):\n #pylint: disable=protected-access,expression-not-assigned,lost-exception\n global pool\n global results\n global stop_all\n global jobfiles_global\n global job_track\n global keeprunning\n global donejobs\n global result_lock\n global lock_monitor\n\n infullnames = {}\n with open(workflow, 'r') as workflowfh:\n # for each wrapper execution\n lines = workflowfh.readlines()\n sys.stdout.flush()\n inputs = {}\n # read in all of the lines in dictionaries\n for linecnt, line in enumerate(lines):\n wrapnum = miscutils.fwsplit(line.strip())[0]\n task = parse_wrapper_line(line, linecnt)\n #task['logfile'] = None\n wcl = WCL()\n with open(task['wclfile'], 'r') as wclfh:\n wcl.read(wclfh, filename=task['wclfile'])\n wcl.update(jwcl)\n\n # get fullnames for inputs and outputs\n ins, _ = intgmisc.get_fullnames(wcl, wcl, None)\n del wcl\n # save input filenames to eliminate from junk tarball later\n infullnames[wrapnum] = []\n for isect in ins:\n for ifile in ins[isect]:\n infullnames[wrapnum].append(ifile)\n jobfiles['infullnames'].extend(ifile)\n inputs[wrapnum] = (task, copy.deepcopy(jobfiles), jwcl, ins)\n job_track[task['wrapnum']] = (task['logfile'], jobfiles)\n # get all of the task groupings, they will be run in numerical order\n tasks = jwcl[\"fw_groups\"].keys()\n tasks.sort()\n # loop over each grouping\n manager = mp.Manager()\n for task in tasks:\n results = [] # the results of running each task in the group\n # get the maximum number of parallel processes to run at a time\n nproc = int(jwcl[\"fw_groups\"][task][\"fw_nthread\"])\n procs = miscutils.fwsplit(jwcl[\"fw_groups\"][task][\"wrapnums\"])\n tempproc = []\n # pare down the list to include only those in this run\n for p in procs:\n if p in inputs.keys():\n tempproc.append(p)\n procs = tempproc\n if nproc > 1:\n numjobs = len(procs)\n # set up the thread pool\n pool = mp.Pool(processes=nproc, maxtasksperchild=2)\n outq = manager.Queue()\n errq = manager.Queue()\n with lock_monitor:\n try:\n donejobs = 0\n # update the input files now, so that it only contains those from the current taks(s)\n for inp in procs:\n jobfiles_global['infullnames'].extend(infullnames[inp])\n # attach all the grouped tasks to the pool\n [pool.apply_async(job_thread, args=(inputs[inp] + (outq, errq, True,),), callback=results_checker) for inp in procs]\n pool.close()\n time.sleep(10)\n while donejobs < numjobs and keeprunning:\n count = 0\n while count < 2:\n count = 0\n try:\n msg = outq.get_nowait()\n print msg\n except:\n count += 1\n try:\n errm = errq.get_nowait()\n sys.stderr.write(errm)\n except:\n count += 1\n time.sleep(.1)\n except:\n results.append(1)\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=4, file=sys.stdout)\n\n raise\n\n finally:\n if stop_all and max(results) > 0:\n # wait to give everything time to do the first round of cleanup\n time.sleep(20)\n # get any waiting messages\n for _ in range(1000):\n try:\n msg = outq.get_nowait()\n print msg\n except:\n break\n for _ in range(1000):\n try:\n errm = errq.get_nowait()\n sys.stderr.write(errm)\n except:\n break\n if not result_lock.acquire(False):\n lock_monitor.wait(60)\n else:\n result_lock.release()\n # empty the worker queue so nothing else starts\n terminate(force=True)\n # wait so everything can clean up, otherwise risk a deadlock\n time.sleep(50)\n del pool\n while True:\n try:\n msg = outq.get(timeout=.1)\n print msg\n except:\n break\n\n while True:\n try:\n errm = errq.get(timeout=.1)\n sys.stderr.write(errm)\n except:\n break\n # in case the sci code crashed badly\n if not results:\n results.append(1)\n jobfiles = jobfiles_global\n jobfiles['infullnames'] = list(set(jobfiles['infullnames']))\n if stop_all and max(results) > 0:\n return max(results), jobfiles\n # if running in single threaded mode\n else:\n temp_stopall = stop_all\n stop_all = False\n\n donejobs = 0\n for inp in procs:\n try:\n jobfiles_global['infullnames'].extend(infullnames[inp])\n results_checker(job_thread(inputs[inp] + (sys.stdout, sys.stderr, False,)))\n except:\n (extype, exvalue, trback) = sys.exc_info()\n traceback.print_exception(extype, exvalue, trback, file=sys.stdout)\n results = [1]\n jobfiles = jobfiles_global\n if results[-1] != 0:\n return results[-1], jobfiles\n stop_all = temp_stopall\n\n\n return 0, jobfiles",
"def RunJobs(self, runfile_mapping, server_run_map):\n if self.workflow is None:\n raise RuntimeError(\"Tried to create unnamed workflow!\")\n\n \n # Generate jobs for the first pass over the data\n for run in sorted(runfile_mapping.keys()):\n if self.VERBOSE>0:\n inputfiles=\"/%s/rawdata/volatile/%s/rawdata/Run%06d/hd_rawdata_*.evio\"%(HDRunFileRAIDList.GetRAIDDirFromRun(run,server_run_map),HDJobUtils.GetRunPeriodFromRun(run),run)\n\n # PASS 0\n print \"processing run %d, phase 0 ...\"%(int(run))\n\n # set up command to execute\n if self.nthreads:\n cmd += \" %s/scripts/%s %s %s %06d %03d %d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass0.csh\",self.basedir,run,inputfiles,int(self.nthreads))\n else:\n cmd += \" %s/scripts/%s %s %s %06d %03d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass0.csh\",self.basedir,run,inputfiles)\n\n # run command\n os.system(cmd)\n\n # PASS 1\n print \"processing run %d, phase 1 ...\"%(int(run))\n\n # set up command to execute\n if self.nthreads:\n cmd += \" %s/scripts/%s %s %s %06d %03d %d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass1.csh\",self.basedir,run,inputfiles,int(self.nthreads))\n else:\n cmd += \" %s/scripts/%s %s %s %06d %03d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass1.csh\",self.basedir,run,inputfiles)\n\n # run command\n os.system(cmd)",
"def scriptGen(self,tmpd='/tmp/jose',libRev='last',submode='qsub',\n redirect=1,PBSoptions=''):\n jobname=self.name\n outdir=self.outd\n qsubdir=scratchdir+'/qsub/'+todayDate() #subdirectory to deposit the script\n if not os.path.exists(qsubdir): pastry('/bin/mkdir -p '+qsubdir)\n script=qsubdir+'/'+jobname+'.sh' #full script file name\n\n if len(jobname) > 15:\n sys.stderr.write('Error: job name '+jobname+' cannot exceed 15 characters')\n return ''\n if not os.path.exists(outdir): os.system('/bin/mkdir -p '+outdir)\n buf=''\n ulimit=int(float(mem_limit)*1024) #maximum resident memory size (Kb) to prevent swapping\n wd=tmpd+'/${PBS_JOBID}'\n #wd=tmpd+'/'+ re.compile('\\W').sub('',self.name) +'_$$' #working directory\n logname=jobname+'.log'\n local_log=wd+'/'+logname\n remote_log=outdir+'/'+logname\n buf= '#!/bin/bash\\n\\n'\n buf+= PBSoptions+'\\n\\n'\n buf+= '#bash function to update library\\n'\n buf+= self.updateNodeLib(libRev)+'\\n\\n'\n buf+= '#bash function to import temporary libs\\n'\n buf+= self.shared_temporal_libraries()+'\\n\\n'\n buf+= '#bash function to clean exit\\n'\n buf+= self.cleanup_exit(submode=submode)+'\\n\\n'\n buf+= 'echo \"'+script+'\"\\n' #write script name withing script body\n buf+= 'hostname\\n' #node where job will be run\n buf+= 'echo $PBS_JOBID\\n'\n buf+= 'ulimit -m '+`ulimit`+' #maximum memory\\n'\n buf+= 'source ~/.bash_profile >/dev/null #environment variables\\n'\n buf+= 'wd='+wd+' #working directory\\n'\n buf+= '/bin/mkdir -p $wd\\n'\n buf+= 'export LOCAL_LOG=\"'+local_log+'\"\\n'\n buf+= '/bin/touch $LOCAL_LOG\\n'\n if submode=='sub' and redirect:\n buf+='exec &> $LOCAL_LOG #redirect STODOUT, STDERR to LOCAL_LOG\\n' \n buf+= 'export REMOTE_LOG=\"'+remote_log+'\"\\n'\n\n but+= '#clean up old log file\\n'\n buf+= 'if [ -f $REMOTE_LOG ]; then\\n' \n buf+= ' /bin/rm -f $REMOTE_LOG\\n'\n buf+= 'fi\\n\\n'\n\n buf+= 'trap \"cleanup_exit 1\" TERM #in case of killing job\\n\\n'\n\n buf+= '#update node code library && import libraries\\n'\n buf+= 'if !('\n buf+= 'updateNodeLib && ' \n buf+= 'shared_temporal_libraries _PREPARE_'\n buf+= ');then\\n'\n buf+= ' cleanup_exit 1\\n'\n buf+= 'fi\\n\\n'\n \n buf+= '/bin/cp '+' '.join(self.inpl)+' $wd #bring input files\\n' \n buf+= 'cd $wd\\n\\n'\n buf+= '#Test command success\\n'\n buf+= 'exs=0 #variable holding script exit status\\n'\n buf+= 'if !('\n buf+= self.exe\n buf+= ');then\\n'\n buf+= ' exs=1\\n'\n buf+= 'fi\\n\\n'\n buf+= '#move even partial results (exs=1)\\n'\n buf+= '/bin/mv '+' '.join(self.outl)+' '+outdir+'\\n'\n buf+= 'cleanup_exit $exs'\n\n open(script,'w').write(buf)\n pastry('chmod u+x '+script)\n\n return script",
"def run_job(args):\n\n global stop_all\n global jobfiles_global\n global jobwcl\n\n jobwcl = WCL()\n jobfiles = {'infullnames': [args.config, args.workflow],\n 'outfullnames': [],\n 'output_putinfo': {}}\n jobfiles_global = {'infullnames': [args.config, args.workflow],\n 'outfullnames': [],\n 'output_putinfo': {}}\n\n jobstart = time.time()\n with open(args.config, 'r') as wclfh:\n jobwcl.read(wclfh, filename=args.config)\n jobwcl['verify_files'] = miscutils.checkTrue('verify_files', jobwcl, False)\n jobwcl['jobroot'] = os.getcwd()\n jobwcl['job_max_usage'] = 0\n #jobwcl['pre_job_disk_usage'] = pfwutils.diskusage(jobwcl['jobroot'])\n jobwcl['pre_job_disk_usage'] = 0\n\n # Save pointers to archive information for quick lookup\n if jobwcl[pfwdefs.USE_HOME_ARCHIVE_INPUT] != 'never' or \\\n jobwcl[pfwdefs.USE_HOME_ARCHIVE_OUTPUT] != 'never':\n jobwcl['home_archive_info'] = jobwcl[pfwdefs.SW_ARCHIVESECT][jobwcl[pfwdefs.HOME_ARCHIVE]]\n else:\n jobwcl['home_archive_info'] = None\n\n if jobwcl[pfwdefs.USE_TARGET_ARCHIVE_INPUT] != 'never' or \\\n jobwcl[pfwdefs.USE_TARGET_ARCHIVE_OUTPUT] != 'never':\n jobwcl['target_archive_info'] = jobwcl[pfwdefs.SW_ARCHIVESECT][jobwcl[pfwdefs.TARGET_ARCHIVE]]\n else:\n jobwcl['target_archive_info'] = None\n\n # run the tasks (i.e., each wrapper execution)\n stop_all = miscutils.checkTrue('stop_on_fail', jobwcl, True)\n\n try:\n jobfiles['infullnames'] = gather_initial_fullnames()\n jobfiles_global['infullnames'].extend(jobfiles['infullnames'])\n miscutils.coremakedirs('log')\n miscutils.coremakedirs('outputwcl')\n exitcode, jobfiles = job_workflow(args.workflow, jobfiles, jobwcl)\n except Exception:\n (extype, exvalue, trback) = sys.exc_info()\n print '!' * 60\n traceback.print_exception(extype, exvalue, trback, file=sys.stdout)\n exitcode = pfwdefs.PF_EXIT_FAILURE\n print \"Aborting rest of wrapper executions. Continuing to end-of-job tasks\\n\\n\"\n\n try:\n create_junk_tarball(jobwcl, jobfiles, exitcode)\n except:\n print \"Error creating junk tarball\"\n # if should transfer at end of job\n if jobfiles['output_putinfo']:\n print \"\\n\\nCalling file transfer for end of job (%s files)\" % \\\n (len(jobfiles['output_putinfo']))\n\n copy_output_to_archive(jobwcl, jobfiles, jobfiles['output_putinfo'], 'job',\n 'job_output', exitcode)\n else:\n print \"\\n\\n0 files to transfer for end of job\"\n if miscutils.fwdebug_check(1, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"len(jobfiles['outfullnames'])=%s\" % \\\n (len(jobfiles['outfullnames'])))\n print \"\\nDESDMTIME: pfwrun_job %0.3f\" % (time.time()-jobstart)\n return exitcode",
"def main():\n init()\n separator_len = 40\n for s in stage_instances:\n print('='*separator_len)\n print(s.name)\n print('-'*separator_len)\n\n s.add_tasks() # Add tasks from previous stage\n s.revive_or_archive() # Revive killed tasks or move them to failed\n s.schedule_jobs() # Schedule new jobs if needed\n s.print_status()\n print('='*separator_len + '\\n')\n render(stage_instances)",
"def _start_torque_workers(self):\n for bundle in self._model.batch_get_bundles(state=State.STAGED, bundle_type='run'):\n resource_args = []\n\n request_cpus = self._compute_request_cpus(bundle)\n if request_cpus:\n resource_args.extend(['-l', 'nodes=1:ppn=%d' % request_cpus])\n\n request_memory = self._compute_request_memory(bundle)\n if request_memory:\n resource_args.extend(['-l', 'mem=%d' % request_memory])\n\n request_queue = bundle.metadata.request_queue or self._default_request_queue\n if request_queue:\n # Either host=<host-name> or <queue-name>, but not tag=<tag>\n m = re.match('host=(.+)', request_queue)\n tagm = re.match('tag=.+', request_queue)\n if m:\n resource_args.extend(['-l', 'host=' + m.group(1)])\n elif not tagm:\n resource_args.extend(['-q', request_queue])\n\n request_priority = bundle.metadata.request_priority or self._default_request_priority\n if request_priority:\n resource_args.extend(['-p', str(request_priority)])\n\n script_args = [\n '--server', self._torque_bundle_service_url,\n '--password-file', self._torque_password_file,\n '--shared-file-system',\n ]\n\n script_env = {\n 'LOG_DIR': self._torque_log_dir,\n 'WORKER_CODE_DIR': self._torque_worker_code_dir,\n # -v doesn't work with spaces, so we have to hack it.\n 'WORKER_ARGS': '|'.join(script_args),\n }\n\n command = self._torque_ssh_command(\n ['qsub',\n '-k', 'n', # do not keep stdout/stderr streams (we redirect them manually to the configured log_dir)\n '-d', '/tmp', # avoid chdir permission problems, worker won't do anything in working directory anyway\n '-v', ','.join([k + '=' + v for k, v in script_env.iteritems()])] +\n resource_args +\n ['-S', '/bin/bash', os.path.join(self._torque_worker_code_dir, 'worker.sh')])\n\n # Throttle Torque commands, sometimes scheduler has trouble keeping up\n elapsed = time.time() - self._last_qsub_time\n if elapsed < self._torque_min_seconds_between_qsub:\n time.sleep(self._torque_min_seconds_between_qsub - elapsed)\n\n try:\n job_handle = subprocess.check_output(command, stderr=subprocess.STDOUT).strip()\n except subprocess.CalledProcessError as e:\n failure_message = 'Failed to launch Torque job: ' + e.output\n logger.info('Failing %s: %s', bundle.uuid, failure_message)\n self._model.update_bundle(\n bundle, {'state': State.FAILED,\n 'metadata': {'failure_message': failure_message}})\n continue\n finally:\n self._last_qsub_time = time.time()\n\n logger.info('Started Torque worker for bundle %s, job handle %s', bundle.uuid, job_handle)\n self._model.set_waiting_for_worker_startup_bundle(bundle, job_handle)",
"def make_jobs(commands, job_prefix, queue, jobs_dir=\"jobs/\",\r\n walltime=\"72:00:00\", ncpus=1, nodes=1, keep_output=\"oe\"):\r\n\r\n filenames = []\r\n create_dir(jobs_dir)\r\n for command in commands:\r\n fd, job_name = mkstemp(dir=jobs_dir, prefix=job_prefix + \"_\",\r\n suffix=\".txt\")\r\n close(fd)\r\n out_fh = open(job_name, \"w\")\r\n\r\n out_fh.write(QSUB_TEXT % (walltime, ncpus, nodes, queue, job_prefix,\r\n keep_output, command))\r\n out_fh.close()\r\n filenames.append(job_name)\r\n return filenames",
"def prepare_parafly_slurm_job_script(sBasename_job, sBasename_parafly, sDirectory_job, sEmail, iWalltime_in = None, nNode_in = None, nThread_in=None, sJob_name_in =None, sPython_env_in =None, sQueue_in=None):\n if iWalltime_in is not None:\n iWalltime = iWalltime_in \n else:\n iWalltime = 2\n if nNode_in is not None:\n iNode = nNode_in \n else:\n iNode = 1\n if nThread_in is not None:\n nThread = nThread_in \n else:\n nThread = 40\n \n if sJob_name_in is not None:\n sJob_name = sJob_name_in \n else:\n sJob_name = 'parafly'\n if sPython_env_in is not None:\n sPython_env = sPython_env_in \n else:\n sPython_env = 'base'\n \n if sQueue_in is not None:\n sQueue = sQueue_in \n else:\n sQueue = 'short'\n \n sWalltime =\"{:0d}\".format(iWalltime )\n sNode = \"{:0d}\".format(iNode )\n sThread = \"{:0d}\".format(nThread )\n \n os.chdir(sDirectory_job)\n \n ofs = open(sBasename_job,\"w\") #write mode \n sLine = '#!/bin/bash' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --account=esmd' + '\\n'\n ofs.write( sLine ) \n\n #sLine = '#SBATCH --begin=now+1minutes' + '\\n'\n #ofs.write( sLine ) \n\n sLine = '#SBATCH --cpus-per-task=1 ' + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --dependency=singleton ' + '\\n'\n ofs.write( sLine )\n sLine = '#SBATCH --error=stderr_%j.err' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --job-name=' + sJob_name + ' # create a name for your job' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --mail-type=ALL' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --mail-user=' + sEmail + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --nodes=' + sNode + ' # node count' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --ntasks=' + sThread + ' # total number of tasks' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --output=stdout_%j.out' + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --partition=' + sQueue + '\\n' #can be improved here\n ofs.write( sLine ) \n sLine = '#SBATCH --time=' + sWalltime +':00:00 # total run time limit (HH:MM:SS)' + '\\n'\n ofs.write( sLine ) \n\n sLine = 'module purge' + '\\n'\n ofs.write( sLine ) \n sLine = 'module load parafly/2013' + '\\n'\n ofs.write( sLine ) \n sLine = 'module load anaconda3/2019.03' + '\\n'\n ofs.write( sLine ) \n sLine = 'source /share/apps/anaconda3/2019.03/etc/profile.d/conda.sh' + '\\n'\n ofs.write( sLine ) \n sLine = 'unset PYTHONHOME' + '\\n'\n ofs.write( sLine ) \n sLine = 'conda activate ' + sPython_env + '\\n'\n ofs.write( sLine ) \n\n sLine = 'ParaFly -c ' + sBasename_parafly + ' -CPU ' + sThread + ' -failed_cmds rerun.txt' + '\\n'\n ofs.write( sLine ) \n \n sLine = 'echo \" Job \" ' + '${SLURM_JOBID}' + ' is launched' + '\\n'\n ofs.write( sLine ) \n\n sLine = 'conda deactivate' + '\\n'\n ofs.write( sLine ) \n \n sLine = 'echo \"Finished\"' + '\\n'\n ofs.write( sLine ) \n ofs.close() \n \n return",
"def makeJob(kallisto, index, meta, bootstraps, files, single, s=1, l=180): \n cmd = \"%(kallisto)s quant -i %(index)s -o %(meta)s \" % locals()\n for file in files: \n cmd += \" ../%s\" % file \n if single: \n cmd += \" --single -l %(l)i -s %(s)i\" % locals()\n cmd += \" &> %s.log.txt\" % meta\n return cmd",
"def go(self):\n\n self._write_master()\n num_fofs = self.fofs['fofid'].max()\n fof_splits = split.get_splits(num_fofs, self['chunksize'])\n\n njobs=0\n fobj=None\n\n icondor=0\n for isplit,fof_split in enumerate(fof_splits):\n if njobs % self['jobs_per_sub']==0:\n if fobj is not None:\n fobj.close()\n fobj = self._open_condor_script(icondor)\n icondor += 1\n\n self._write_split(fobj, isplit, fof_split)\n\n njobs += 1",
"def run( **kwargs ):\n\n # combine options using correct preference\n options = dict(PARAMS.items())\n options.update( getCallerLocals().items() )\n options.update( kwargs.items() )\n\n def setupJob( session ):\n\n jt = session.createJobTemplate()\n jt.workingDirectory = os.getcwd()\n jt.jobEnvironment = { 'BASH_ENV' : '~/.bashrc' }\n jt.args = []\n jt.nativeSpecification = \"-V -q %s -p %i -N %s %s\" % \\\n (options.get(\"job_queue\", global_options.cluster_queue ),\n options.get(\"job_priority\", global_options.cluster_priority ),\n \"_\" + re.sub( \"[:]\", \"_\", os.path.basename(options.get(\"outfile\", \"ruffus\" ))),\n options.get(\"job_options\", global_options.cluster_options))\n\n # keep stdout and stderr separate\n jt.joinFiles=False\n\n return jt\n\n shellfile = os.path.join( os.getcwd(), \"shell.log\" )\n \n # run multiple jobs\n if options.get( \"statements\" ):\n\n statement_list = []\n for statement in options.get(\"statements\"): \n options[\"statement\"] = statement\n statement_list.append(buildStatement( **options))\n \n if options.get( \"dryrun\", False ): return\n\n # get session for process - only one is permitted\n pid = os.getpid()\n if pid not in global_sessions: \n\n L.debug( \"creating new drmaa session for pid %i\" % pid )\n global_sessions[pid]=drmaa.Session() \n global_sessions[pid].initialize()\n\n session = global_sessions[pid]\n \n jt = setupJob( session )\n \n jobids, filenames = [], []\n for statement in statement_list:\n # create job script\n tmpfile = tempfile.NamedTemporaryFile( dir = os.getcwd() , delete = False )\n tmpfile.write( \"#!/bin/bash\\n\" ) # -l -O expand_aliases\\n\" )\n tmpfile.write( 'echo \"START--------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( '''echo 'statement=%s' >> %s\\n''' % (statement, shellfile) )\n tmpfile.write( \"set &>> %s\\n\" % shellfile)\n tmpfile.write( \"module list &>> %s\\n\" % shellfile )\n tmpfile.write( 'echo \"END----------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( expandStatement(statement) + \"\\n\" )\n tmpfile.close()\n\n # build paths\n job_path = os.path.abspath( tmpfile.name )\n stdout_path = job_path + \".stdout\" \n stderr_path = job_path + \".stderr\" \n\n jt.remoteCommand = job_path\n jt.outputPath=\":\"+ stdout_path\n jt.errorPath=\":\" + stderr_path\n\n os.chmod( job_path, stat.S_IRWXG | stat.S_IRWXU )\n\n jobid = session.runJob(jt)\n jobids.append( jobid )\n filenames.append( (job_path, stdout_path, stderr_path) )\n\n L.debug( \"job has been submitted with jobid %s\" % str(jobid ))\n \n L.debug( \"waiting for %i jobs to finish \" % len(jobids) )\n session.synchronize(jobids, drmaa.Session.TIMEOUT_WAIT_FOREVER, False)\n \n # collect and clean up\n for jobid, statement, paths in zip( jobids, statement_list, filenames) :\n job_path, stdout_path, stderr_path = paths\n retval = session.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)\n\n stdout, stderr = getStdoutStderr( stdout_path, stderr_path )\n\n if retval.exitStatus != 0:\n raise PipelineError( \"---------------------------------------\\n\"\n \"Child was terminated by signal %i: \\n\"\n \"The stderr was: \\n%s\\n%s\\n\" \n \"---------------------------------------\\n\" % \\\n (retval.exitStatus, \n \"\".join( stderr),\n statement ) )\n\n os.unlink( job_path )\n \n session.deleteJobTemplate(jt)\n\n # run a single parallel job\n elif (options.get( \"job_queue\" ) or options.get( \"to_cluster\" )) and not global_options.without_cluster:\n\n statement = buildStatement( **options )\n\n if options.get( \"dryrun\", False ): return\n\n tmpfile = tempfile.NamedTemporaryFile( dir = os.getcwd() , delete = False )\n tmpfile.write( \"#!/bin/bash\\n\" ) # -l -O expand_aliases\\n\" )\n\n tmpfile.write( 'echo \"START--------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( 'echo \"statement=%s\" >> %s\\n' % (statement, shellfile) )\n tmpfile.write( \"set &>> %s\\n\" % shellfile)\n tmpfile.write( \"module list &>> %s\\n\" % shellfile )\n tmpfile.write( 'echo \"END----------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( expandStatement( statement ) + \"\\n\" )\n tmpfile.close()\n\n job_path = os.path.abspath( tmpfile.name )\n stdout_path = job_path + \".stdout\" \n stderr_path = job_path + \".stderr\" \n\n os.chmod( job_path, stat.S_IRWXG | stat.S_IRWXU )\n\n # get session for process - only one is permitted\n pid = os.getpid()\n if pid not in global_sessions:\n L.debug( \"creating new drmaa session for pid %i\" % pid )\n global_sessions[pid]=drmaa.Session() \n global_sessions[pid].initialize()\n\n session = global_sessions[pid]\n\n jt = setupJob( session )\n\n jt.remoteCommand = job_path\n # later: allow redirection of stdout and stderr to files; can even be across hosts?\n jt.outputPath=\":\"+ stdout_path\n jt.errorPath=\":\" + stderr_path\n\n if \"job_array\" in options and options[\"job_array\"] != None:\n # run an array job\n start, end, increment = options.get(\"job_array\" )\n L.debug(\"starting an array job: %i-%i,%i\" % (start, end, increment ))\n # sge works with 1-based, closed intervals\n jobids = session.runBulkJobs( jt, start+1, end, increment )\n L.debug( \"%i array jobs have been submitted as jobid %s\" % (len(jobids), jobids[0]) )\n retval = session.synchronize(jobids, drmaa.Session.TIMEOUT_WAIT_FOREVER, True)\n else:\n jobid = session.runJob(jt)\n L.debug( \"job has been submitted with jobid %s\" % str(jobid ))\n try:\n retval = session.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)\n except Exception, msg:\n # ignore message 24 in PBS\n # code 24: drmaa: Job finished but resource usage information and/or termination status could not be provided.\":\n if not msg.message.startswith(\"code 24\"): raise\n retval = None\n\n stdout, stderr = getStdoutStderr( stdout_path, stderr_path )\n\n if \"job_array\" not in options:\n if retval and retval.exitStatus != 0:\n raise PipelineError( \"---------------------------------------\\n\"\n \"Child was terminated by signal %i: \\n\"\n \"The stderr was: \\n%s\\n%s\\n\"\n \"-----------------------------------------\" % \\\n (retval.exitStatus, \n \"\".join( stderr), statement))\n \n session.deleteJobTemplate(jt)\n os.unlink( job_path )\n\n else:\n statement = buildStatement( **options )\n\n if options.get( \"dryrun\", False ): return\n \n if \"<(\" in statement:\n if \"'\" in statement: raise ValueError( \"advanced bash syntax combined with single quotes\" )\n statement = \"\"\"/bin/bash -c '%s'\"\"\" % statement\n\n process = subprocess.Popen( expandStatement( statement ),\n cwd = os.getcwd(), \n shell = True,\n stdin = subprocess.PIPE,\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE )\n\n # process.stdin.close()\n stdout, stderr = process.communicate()\n\n if process.returncode != 0:\n raise PipelineError( \"---------------------------------------\\n\"\n \"Child was terminated by signal %i: \\n\"\n \"The stderr was: \\n%s\\n%s\\n\"\n \"-----------------------------------------\" % \\\n (-process.returncode, stderr, statement ))",
"def write_pbs(self):\n fout = open(\"runStarCCM.pbs\", \"w\")\n fout.write(\"#PBS -S /bin/csh\\n\")\n fout.write(\"#PBS -l select=\" + str(self.numNodes) + \":ncpus=\" + str(self.numCPUs) + \":mpiprocs=\" + str(self.mpiProcs) + \":model=has,walltime=\" + self.WallTime + \"\\n\\n\")\n fout.write(\"#PBS -W group_list=\" + self.GroupID + \"\\n\")\n fout.write(\"#PBS -j oe\\n\")\n fout.write(\"#PBS -q \" + self.queue + \"\\n\")\n fout.write(\"#PBS -N \" + self.jobName + \"\\n\")\n fout.write(\"#PBS -m e\\n\")\n fout.write(\"#PBS -W block=true\\n\\n\")\n fout.write(\"cd $PBS_O_WORKDIR\\n\")\n\n if self.runVolGrid == 1:\n #fout.write(\"/bin/rm -f \" + self.simMeshFile + \".sim\\n\")\n fout.write(\"/bin/rm -f starccmMeshRun.out\\n\")\n fout.write(\"chmod u+x \" + self.cshBatch1File + \".csh\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch1File + \".csh -powerOnDemand \" + self.javaBatch1File + \".java >& starccmMeshRun.out\\n\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a mesh run.'\\n\")\n\n if self.runCFD == 1:\n fout.write(\"chmod u+x \" + self.cshBatch2File + \".csh\\n\")\n fout.write(\"/bin/rm -f *.csv *.png starccmFlowRun.out\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch2File + \".csh -powerOnDemand \" + self.javaBatch2File + \".java \" + self.simMeshFile + \" >& starccmFlowRun.out\\n\\n\")\n fout.write(\"# rename the strange file names\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.csv ForceX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.csv ForceY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.csv ForceZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.csv MomentX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.csv MomentY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.csv MomentZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.csv Residuals.csv\\n\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.png ForceX.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.png ForceY.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.png ForceZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.png MomentX.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.png MomentY.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.png MomentZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.png Residuals.png\\n\")\n fout.write(\"/bin/mv \\$PWDUpperCp.png UpperCp.png\\n\")\n fout.write(\"/bin/mv \\$PWDLowerCp.png LowerCp.png\\n\")\n fout.write(\"/bin/rm -rf null\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a CFD run.'\\n\")\n\n fout.close()",
"def launchJobs(options, cmdargs, errStream=sys.stdin):\n\n if options.queue == LOCAL:\n launchLocalJobs(options,cmdargs,errStream)\n return\n\n logging.debug(\"Launching task array: %r\" % ({'tmpDir':options.tmpDir,'splits':options.splits,'fragName':options.fragBase,'cmd':cmdargs,'sgeOpts':options.sgeOptions,'job':options.jobName,'priority':options.priority,'loglevel':options.verbose,'wait':options.wait, 'type':options.taskType}))\n \n # SGE or SLURM submission prefix\n command = getSubmissionCommandPrefix(options)\n\n # batch_runner command\n command.append(BATCHLAUNCHER)\n command+=[\"--mode\",\"run\",\"--tmp_dir\",options.tmpDir,\"--frag_base\",\n options.fragBase, \"--frag_dir\", options.frag_dir, \"--frag_suffix\", options.fragSuff, \"--loglevel\", str(options.verbose), \"--queue\", options.queue]\n if options.inputFlag is not None:\n command.append('-i=%s' % (options.inputFlag))\n if options.prefixFlag is not None:\n command.append('-p=%s' % (options.prefixFlag))\n if options.threadsFlag is not None:\n command+=['-t',str(options.threadsFlag)]\n if options.outputFlags is not None:\n for flag in options.outputFlags:\n command.append('-o=%s' % (flag))\n if options.taskType is not None:\n command+=['--taskType',options.taskType]\n if options.cwd:\n command.append('--cwd')\n command.append('--')\n command+=cmdargs\n\n # redirect qsub output to std, silence if vebose is 0\n #if options.verbose==0:\n # qsubOuts=open(os.devnull,'w')\n #else:\n # qsubOuts=errStream\n \n # run command\n logging.debug('Launching task array: %s' % (formatCommand(command)))\n try:\n submissionOutput = subprocess.check_output(command)\n try:\n submissionOutput = submissionOutput.decode()\n except:\n pass\n if options.verbose>0:\n errStream.write(\"Submission Output: \" + submissionOutput)\n except subprocess.CalledProcessError as error:\n if options.wait and options.queue != SLURM:\n # when using -sync y, the exit code may come from a task\n # (which cleanup will handle)\n logging.warning(\"qsub returned an error code of: %d\" \n % error.returncode)\n else:\n raise error\n\n # get job id\n try:\n jobid = re.search(r'(\\d+)\\s*$',submissionOutput).group(1)\n options.jobid = jobid\n except:\n if options.queue==SLURM:\n logging.error(\"Cannot parse SLURM job id from '%s'\" % (submissionOutput))\n raise\n\n # SLURM doesn't allow waiting for completion on array jobs, so we hack:\n # use srun to start a dummy job that will wait for our job array\n if options.wait and options.queue==SLURM:\n waitForSlurmArray(options, errStream)",
"def _insertAllSteps(self):\n \n # Get pointer to input micrographs \n self.particlePickingRun = self.xmippParticlePicking.get()\n \n copyId = self._insertFunctionStep('copyInputFilesStep')\n # Get micrographs to pick\n #self.inputMicrographs.set(self.getInputMicrographs())\n \n deps = []\n for mic in self.getInputMicrographs():\n stepId = self._insertFunctionStep('autopickMicrographStep', mic.getFileName(), prerequisites=[copyId])\n deps.append(stepId)\n \n self._insertFunctionStep('_createOutput',self._getExtraPath(), prerequisites=deps)",
"def create_job_ymls(job_template_args, job_template_dir, job_template_name, output_dir, upload=True):\n for i, template_args in enumerate(job_template_args):\n template_loader = jinja2.FileSystemLoader(searchpath=job_template_dir)\n template_env = jinja2.Environment(loader=template_loader)\n template_file = job_template_name\n template = template_env.get_template(template_file)\n output_text = template.render(template_args)\n job_yml = 'vista-job-{}.yml'.format(i)\n job_yml_path = join(output_dir, job_yml)\n \n if not exists(output_dir):\n mkdir(output_dir)\n with open(job_yml_path, 'w') as output_template:\n output_template.write(output_text)\n if upload:\n upload_blob(UPLOAD_BUCKET, job_yml_path, job_yml)\n print(job_yml, 'uploaded')",
"def _insertAllSteps(self): \n self.uMics = self.inputCoordinatesTiltedPairs.get().getUntilted().getMicrographs()\n self.tMics = self.inputCoordinatesTiltedPairs.get().getTilted().getMicrographs()\n\n self.inputMics = self._createSetOfParticles('auxMics')\n self.inputMics.copyInfo(self.uMics)\n self.inputMics.setStore(False)\n \n for micU, micT in izip(self.uMics, self.tMics):\n micU.cleanObjId()\n micT.cleanObjId()\n self.inputMics.append(micU)\n self.inputMics.append(micT)\n\n self.samplingInput = self.uMics.getSamplingRate()\n \n\n if self.downsampleType.get() != OTHER:\n # If 'same as picking' or 'original' get sampling rate from input micrographs\n #TODO: Review this when downsampling before picking is possible\n self.samplingFinal = self.samplingInput\n else:\n # If 'other' multiply the input sampling rate by the factor provided\n self.samplingFinal = self.samplingInput*self.downFactor.get()\n \n # Write pos files for each micrograph\n firstStepId = self._insertFunctionStep('writePosFilesStep')\n \n # For each micrograph insert the steps\n #run in parallel\n \n deps = []\n for mic in self.inputMics:\n localDeps = [firstStepId]\n micrographToExtract = mic.getFileName()\n micName = removeBaseExt(mic.getFileName())\n micId = mic.getObjId()\n\n # If downsample type is 'other' perform a downsample\n if self.downsampleType == OTHER:\n fnDownsampled = self._getTmpPath(micName+\"_downsampled.xmp\")\n downFactor = self.downFactor.get()\n args = \"-i %(micrographToExtract)s -o %(fnDownsampled)s --step %(downFactor)f --method fourier\"\n localDeps=[self._insertRunJobStep(\"xmipp_transform_downsample\", args % locals(),prerequisites=localDeps)]\n micrographToExtract = fnDownsampled\n \n # If remove dust \n if self.doRemoveDust:\n fnNoDust = self._getTmpPath(micName+\"_noDust.xmp\")\n \n thresholdDust = self.thresholdDust.get() #TODO: remove this extra variable\n args=\" -i %(micrographToExtract)s -o %(fnNoDust)s --bad_pixels outliers %(thresholdDust)f\"\n localDeps=[self._insertRunJobStep(\"xmipp_transform_filter\", args % locals(),prerequisites=localDeps)]\n micrographToExtract = fnNoDust\n \n #self._insertFunctionStep('getCTF', micId, micName, micrographToExtract)\n micName = removeBaseExt(mic.getFileName())\n \n # Actually extract\n deps.append(self._insertFunctionStep('extractParticlesStep', micId, micName, \n None, micrographToExtract, prerequisites=localDeps))\n # TODO: Delete temporary files\n \n # Insert step to create output objects \n self._insertFunctionStep('createOutputStep', prerequisites=deps)",
"def make_all(i_file, config,\n out_dir, submit=True, pism_root=pism_root, **kwargs):\n\n # make new directory or break if existing\n try:\n os.makedirs(out_dir)\n except OSError:\n print(\"Directory %s exists, skipping it.\" % out_dir)\n return 2\n\n # make config file\n c_path = make_config(config, out_dir=out_dir, pism_root=pism_root)\n\n # make job script chain\n j_list = make_chain(i_file,\n out_dir=out_dir, pism_root=pism_root, **kwargs)\n\n # submit job chain\n if submit is True:\n j_list = submit_chain(j_list)\n\n # no error, return 0\n return 0",
"def standard_job_set(msg):\n\n run_num = msg['run']\n jobs = [[], [], [], [], []]\n new_dep = {'time': None, 'md5': None}\n\n # Add ROME jobs first\n cmd_prefix = \"./midanalyzer.exe -b -i romeConfig.xml -r \"\n cmd_suffix = \" -m offline -p 0 -q\"\n rome_dir = offline_dir + '/rome-processors'\n\n job = {}\n job['meta'] = datadir + '/shim/.crunchd_metadata.json'\n job['cmd'] = cmd_prefix + str(run_num) + cmd_suffix\n job['clean'] = 'rm histos*.root run*.root'\n\n job['name'] = 'single-laser'\n job['dir'] = rome_dir + '/single-laser'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'double-laser'\n job['dir'] = rome_dir + '/double-laser'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'capacitec'\n job['dir'] = rome_dir + '/capacitec'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'shim-platform'\n job['dir'] = rome_dir + '/shim-platform'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'metrolab'\n job['dir'] = rome_dir + '/metrolab'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'mscb-cart'\n job['dir'] = rome_dir + '/mscb-cart'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'mscb-ring'\n job['dir'] = rome_dir + '/mscb-ring'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'tilt-sensor'\n job['dir'] = rome_dir + '/tilt-sensor'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n job = copy.copy(job)\n job['name'] = 'hall-probe'\n job['dir'] = rome_dir + '/hall-probe'\n job['deps'] = {}\n job['deps'][job['dir'] + '/midanalyzer.exe'] = new_dep\n jobs[0].append(job)\n\n # Make sure run attributes are extracted.\n job = {}\n job['name'] = 'extract_run_attr'\n job['dir'] = offline_dir + '/crunchers'\n job['cmd'] = 'python scripts/extract_run_attr.py %i' % run_num\n job['clean'] = None\n job['meta'] = datadir + '/crunched/.crunchd_metadata.json'\n job['deps'] = {}\n jobs[0].append(job)\n\n # Now the data bundling job.\n job = {}\n job['name'] = 'make-shim-dataset'\n job['cmd'] = 'bin/make_shim_dataset %i' % run_num\n job['clean'] = None\n job['dir'] = offline_dir + '/crunchers'\n job['meta'] = datadir + '/shim/.crunchd_metadata.json'\n job['deps'] = {}\n job['deps'][offline_dir + '/crunchers/bin/make_shim_dataset'] = new_dep\n job['deps']['data/shim/*%05i.root' % run_num] = new_dep\n jobs[1].append(job)\n\n # Finally apply fixes.\n # job = {}\n # job['name'] = 'fix-probe-remap'\n # job['dir'] = offline_dir\n # job['cmd'] = 'bin/fix_run_probe_map '\n # job['cmd'] += 'data/crunched/run_%05i.root ' % run_num\n # job['cmd'] += 'data/crunched/ %i' % run_num\n # job['clean'] = None\n # job['meta'] = datadir + '/crunched/.crunchd_metadata.json'\n # job['deps'] = {}\n # job['deps'][offline_dir + '/bin/recrunch_fids'] = new_dep\n # job['deps'][datadir + '/shim/run_%05i.root' % run_num] = new_dep\n # jobs[2].append(job)\n\n # Automatically generate extracted dataset\n job = {}\n job['name'] = 'extraction'\n job['dir'] = offline_dir + '/crunchers'\n job['cmd'] = 'bin/make_extracted_dataset '\n job['cmd'] += 'data/crunched/run_%05i.root' % run_num\n job['clean'] = None\n job['meta'] = datadir + '/extracted/.crunchd_metadata.json'\n job['deps'] = {}\n job['deps'][offline_dir + '/crunchers/bin/make_extracted_dataset'] = new_dep\n job['deps'][datadir + '/crunched/run_%05i.root' % run_num] = new_dep\n jobs[3].append(job)\n\n return jobs",
"def make_jobscript(i_file, atm_file=None, dt_file=None, dp_file=None,\n fp_file=None, pp_file=None, sd_file=None, dsl_file=None,\n om_file=None, extra_vars=extra_vars,\n lapse_rate=6.0, ys=0.0, ye=1000.0, yts=10, yextra=100,\n mpi_exec=mpi_exec, pism_exec=pism_exec, pism_root=pism_root,\n nodes=1, time='24:00:00', out_dir=None, job_name='unnamed',\n ntasks_per_node=36, **boot_kwargs):\n\n # expand path to PISM root\n pism_root = os.path.abspath(pism_root)\n\n # get input and component model arguments\n input_args = get_input_args(i_file, pism_root=pism_root, **boot_kwargs)\n atm_args = get_atm_args(atm_file=atm_file, lapse_rate=lapse_rate,\n dt_file=dt_file, dp_file=dp_file, fp_file=fp_file,\n pp_file=pp_file, pism_root=pism_root)\n surface_args = get_surface_args(sd_file=sd_file, pism_root=pism_root)\n ocean_args = get_ocean_args(dsl_file=dsl_file, om_file=om_file, pism_root=pism_root)\n\n # format script\n script = template.format(**locals())\n\n # write script to file\n script_path = os.path.join(out_dir, 'job.' + job_name + '.sh')\n with open(script_path, 'w') as f:\n f.write(script)\n\n # return path to job script\n return script_path",
"def setup():\n processes = []\n try:\n s3.create_bucket(Bucket=BUCKET)\n jotfs_p = subprocess.Popen([\n \"./bin/jotfs\",\n \"-db\", DBNAME,\n \"-port\", str(PORT),\n \"-store_bucket\", BUCKET,\n \"-store_access_key\", STORE_ACCESS_KEY,\n \"-store_secret_key\", STORE_SECRET_KEY,\n \"-store_endpoint\", STORE_ENDPOINT,\n \"-tls_cert\", TLS_CERT,\n \"-tls_key\", TLS_KEY,\n \"-store_region\", \"us-east-1\",\n \"-debug\", \"-store_path_style\", \"-store_disable_ssl\"\n ])\n processes.append(jotfs_p)\n return processes\n except Exception as e:\n for p in processes:\n p.kill()\n raise e"
] | [
"0.7333931",
"0.71901464",
"0.67998576",
"0.6780292",
"0.63524926",
"0.63199294",
"0.6265143",
"0.6096322",
"0.6079112",
"0.588439",
"0.58698326",
"0.58114004",
"0.5785257",
"0.57349914",
"0.56735706",
"0.5660371",
"0.56586134",
"0.5653163",
"0.56474715",
"0.5637175",
"0.56359106",
"0.55911905",
"0.5588382",
"0.5548484",
"0.55404234",
"0.5488436",
"0.54881537",
"0.5474069",
"0.5472582",
"0.5446555"
] | 0.73390156 | 0 |
Writes the output in Sun Grid Engine job array submission format. Creates sub shell scripts that contain the workflow for each input file separately. After this main shell script containing SGE configuration is created. This script is responsible for starting the sub shells as separate processes. | def write_sge(workloads, input_file_parameters, command_line_parameters):
validate_resource_manager_parameters(
input_file_parameters.resource_manager_params,
['# -o', '# -e', '# -t'])
workload_index = 0
workload_zfill_amount = len(str(len(workloads)))
workload_file_paths = []
for workload in workloads:
# Each workflow part will have separate file to submit to TORQUE with
# sbatch command. Each file has one or more associated subshell files
# containing contents for each thread.
# Generate strings describing current workload and thread indexes for
# output file names
workload_index += 1
workload_index_string = str(workload_index).zfill(workload_zfill_amount)
file_main_name = '{0}_SGE_WORKLOAD_{1}'.format(NAME,
workload_index_string)
# When --fix_run mode is used the output and log files files already
# exist. To prevent overwriting these files with new ones specific
# prefix or appendix strings are added to the new output file names.
prefix = ''
appendix = '.sh'
i = 0
if command_line_parameters.fix_run:
mode = 'FIX'
elif command_line_parameters.compress_run == 'compress':
mode = 'COMPRESS'
elif command_line_parameters.compress_run == 'decompress':
mode = 'DECOMPRESS'
else:
mode = None
while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,
file_main_name + appendix)):
i += 1
prefix = '{0}_{1}_'.format(mode, i)
appendix = '_{0}_{1}.sh'.format(mode, i)
# Generate subshell files
thread_index = 1
for thread_contents in workload:
# Iterate over output commands of each thread and write necessary
# subshell files for each
out_lines = []
cmds_in_thread = len(thread_contents)
for i in xrange(cmds_in_thread):
# Check if any modules need loading or are they loaded by previous command
skip_module_loading = False
if i > 0:
if thread_contents[i].load_module == thread_contents[i-1].load_module:
skip_module_loading = True
# Check if any modules need unloading or will they be used by following command
skip_module_unloading = False
if i < cmds_in_thread-1:
if thread_contents[i].load_module == thread_contents[i+1].load_module:
skip_module_unloading = True
out_lines += generate_subshell_file_contents(thread_contents[i],
skip_module_loading,
skip_module_unloading)
# Write subshell file
thread_index_string = str(thread_index)
fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,
workload_index_string,
thread_index_string,
appendix)
try:
out_fl = open(os.path.join(input_file_parameters.output_dir,
fl_name), 'w')
except:
raise STAPLERerror.STAPLERerror('Unable to create output file:'
'\n{0}'.format(os.path.join(
input_file_parameters.output_dir,
fl_name)))
out_fl.write('\n'.join(out_lines))
out_fl.write('\n')
out_fl.close()
thread_index += 1
# Create lines for SGE input file by generating job-name, output,
# error and array parameters based on user input
status_file_basename = os.path.join(input_file_parameters.output_dir,
prefix +
input_file_parameters.job_name + '_$TASK_ID')
# IF YOU ADD NEW AUTOMATICALLY INFERRED PARAMETERS, REMEMBER TO VALIDATE
# THEM AT THE BEGINNING OF THIS FUNCTION
resmng_config = list(input_file_parameters.resource_manager_params)
resmng_config.append('#$ -o {0}.out'.format(status_file_basename))
resmng_config.append('#$ -e {0}.err'.format(status_file_basename))
resmng_config.append('#$ -t {0}-{1}'.format(1, len(workload)))
resmng_config.append('\n\n')
subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,
workload_index_string,
'"$SGE_TASK_ID"',
appendix)
subshell_file_path = os.path.join(input_file_parameters.output_dir,
subshell_file_path)
resmng_config.append('source {0}'.format(subshell_file_path))
out_fl_path = os.path.join(input_file_parameters.output_dir,
file_main_name + appendix)
workload_file_paths.append(out_fl_path)
try:
out_fl = open(out_fl_path, 'w')
except IOError as emsg:
raise STAPLERerror.STAPLERerror('Unable to create output file:'
'\n{0}\n with error message:\n{1}'
.format(os.path.join(input_file_parameters.output_dir,
file_main_name + appendix),
str(emsg)))
out_fl.write('\n'.join(resmng_config))
out_fl.write('\n')
out_fl.close()
return workload_file_paths | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def gen_jobs(fpath, num_runs, netid):\n\n run = \"\"\n run += \"import sys\\n\"\n run += \"import subprocess\\n\"\n run += \"cmd_array = (\"\n for i in range(num_runs):\n run += \"r\\\"python test.py %d\\\"\" % i\n run += \",\\n\"\n\n run += \")\\n\"\n run += \"p = subprocess.Popen(cmd_array[int(sys.argv[1])-1], shell=True, stdout=subprocess.PIPE)\\n\"\n run += \"out = p.stdout.read()\"\n# run += \"print cmd_array[int(sys.argv[1])]\"\n\n script_name = \"test\"\n\n if verbose:\n print \"Writing array script: \" + \"run.\" + script_name + \".py\"\n f = open(os.path.join(fpath, \"run.\" + script_name + \".py\"), 'w')\n f.write(\"%s\\n\" % run)\n\n f = open(os.path.join(fpath, \"submit_run.\" + script_name + \".sh\"), 'w')\n submit_run = \"#!/bin/csh\\n\"\n submit_run += \"#$ -N %s\\n\" % (\"job_%d\" % num_runs)\n submit_run += \"#$ -t 1:%d\\n\" % (num_runs)\n submit_run += \"#$ -M %[email protected]\\n\\n\" % (netid)\n# submit_run += \"#$ -q short\"\n# submit_run += \"#$ -r y\"\n submit_run += \"python run.%s.py ${SGE_TASK_ID}\" % (script_name)\n\n if verbose:\n print \"Writing submit shell script: \" + \"submit_run.\" + script_name + \".sh\"\n f.write(\"%s\\n\" % submit_run)",
"def write_slurm(workloads, input_file_parameters, command_line_parameters):\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to SLURM with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_SBATCH_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n prefix = ''\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n prefix = '{0}_{1}_'.format(mode, i)\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index += 1\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n\r\n # Create lines for SLURM input file by generating job-name, output,\r\n # error and array parameters based on user input\r\n status_file_basename = os.path.join(input_file_parameters.output_dir,\r\n prefix + input_file_parameters.job_name)\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('#SBATCH --job-name={0}'.format(input_file_parameters.job_name))\r\n resmng_config.append('#SBATCH --output={0}_%A_%a.out'.format(status_file_basename))\r\n resmng_config.append('#SBATCH --error={0}_%A_%a.err'.format(status_file_basename))\r\n resmng_config.append('#SBATCH --array={0}-{1}'.format(1, len(workload)))\r\n\r\n resmng_config.append('\\n\\n')\r\n subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n '\"$SLURM_ARRAY_TASK_ID\"',\r\n appendix)\r\n subshell_file_path = os.path.join(input_file_parameters.output_dir,\r\n subshell_file_path)\r\n resmng_config.append('source {0}'.format(subshell_file_path))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir,file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def write_unix(workloads, input_file_parameters, command_line_parameters):\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n background_process_list = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_UNIX_WORKLOAD_1'.format(NAME)\r\n\r\n # Add information about current workflow to the main shell script\r\n background_process_list.append('echo \"Running workload part {0}\"'.format(\r\n workload_index))\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is 'FIX' and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n if mode in ('COMPRESS', 'DECOMPRESS'):\r\n appendix = '_{0}.sh'.format(mode)\r\n while os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n thread_zfill_amount = len(str(len(workload)))\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index).zfill(thread_zfill_amount)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n # i.e. use UNIX source to run input shell script, redirect stdout\r\n # and stderr to an .out file.\r\n background_process_list.append('source {0} >> {0}.out 2>&1 &'.format(\r\n os.path.join(input_file_parameters.output_dir,\r\n fl_name)))\r\n thread_index += 1\r\n\r\n # Workflow steps are written to a single output file (instead of\r\n # separate files). \"wait\" command is inserted in between workflow parts\r\n # to synchronize workflows.\r\n background_process_list.append('wait\\n\\n')\r\n\r\n # Write the main shell script file\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('\\n\\n')\r\n resmng_config.append('\\n'.join(background_process_list))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir, file_main_name + appendix)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return [out_fl_path]",
"def build_job_scripts(model_list, scenario_list, output_dir, cassandra_config_dir, cassandra_log_dir,\n cassandra_main_script, sbatch_account, sbatch_partition='slurm', sbatch_walltime='01:00:00',\n sbatch_ntasks=3, sbatch_nodes=3, sbatch_jobname='cassie', sbatch_logdir='.', template=None):\n\n # use default configuration template file if user does not give one\n if template is None:\n template = pkg_resources.resource_filename('cassie', 'data/sbatch_template.sh')\n\n # existing tags to replace in the template file\n model_tag = '<model>'\n scenario_tag = '<scenario>'\n account_tag = '<account>'\n partition_tag = '<partition>'\n ntasks_tag = '<ntasks>'\n nodes_tag = '<nodes>'\n time_tag = '<walltime>'\n jobname_tag = '<jobname>'\n logdir_tag = '<logdir>'\n cassandra_configdir_tag = '<cassconfigdir>'\n cassandra_logdir_tag = '<casslogdir>'\n cassandra_script_tag = '<cassmainscript>'\n\n for model in model_list:\n for scenario in scenario_list:\n\n output_file = os.path.join(output_dir, f'run_{model.lower()}_{scenario}.sh')\n\n with open(output_file, 'w') as out:\n with open(template) as get:\n\n f = get.read()\n\n # replace tag names with dynamic content\n fx = f.replace(model_tag, model)\n fx = fx.replace(scenario_tag, scenario)\n\n fx = fx.replace(account_tag, sbatch_account)\n fx = fx.replace(partition_tag, sbatch_partition)\n fx = fx.replace(ntasks_tag, str(sbatch_ntasks))\n fx = fx.replace(nodes_tag, str(sbatch_nodes))\n fx = fx.replace(time_tag, sbatch_walltime)\n fx = fx.replace(jobname_tag, sbatch_jobname)\n fx = fx.replace(logdir_tag, sbatch_logdir)\n\n fx = fx.replace(cassandra_configdir_tag, cassandra_config_dir)\n fx = fx.replace(cassandra_logdir_tag, cassandra_log_dir)\n fx = fx.replace(cassandra_script_tag, cassandra_main_script)\n\n out.write(fx)",
"def write_default(workflows, output_dir):\r\n\r\n # Calculate the total number of commands\r\n number_of_commands = 0\r\n for workflow in workflows:\r\n number_of_commands += sum(map(len, workflow))\r\n\r\n # Create command line strings\r\n i = 0\r\n out_lines = ['echo Started executing shell script at:', 'date']\r\n for workflow in workflows:\r\n for workflow_step in workflow:\r\n for cmd in workflow_step:\r\n i += 1\r\n cmd_list = cmd.command_lines\r\n cmd_list = map(clean_command_lines, cmd_list)\r\n out_lines.append('echo Executing command {0}/{1}:'\r\n .format(i, number_of_commands))\r\n for c in cmd_list:\r\n c = c.replace('>', '\\\\>')\r\n c = c.replace('|', '\\\\|')\r\n out_lines.append('echo ' + c)\r\n out_lines.append('date')\r\n\r\n #Load modules\r\n if cmd.load_module:\r\n for module in cmd.load_module:\r\n out_lines.append(module)\r\n\r\n #The command\r\n out_lines += cmd_list\r\n\r\n #Unload modules\r\n if cmd.unload_module:\r\n for module in cmd.unload_module:\r\n out_lines.append(module)\r\n out_lines.append('echo Finished at:')\r\n out_lines.append('date')\r\n\r\n #Open and write command lines\r\n fl_name = '{0}_output_{1}.sh'.format(NAME, START_TIME)\r\n output_file_path = os.path.join(output_dir, fl_name)\r\n try:\r\n out_fl = open(output_file_path, 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(output_dir,\r\n fl_name)))\r\n out_fl.write('#!/usr/bin/env bash\\n')\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.close()\r\n return [output_file_path]",
"def prepare_parafly_slurm_job_script(sBasename_job, sBasename_parafly, sDirectory_job, sEmail, iWalltime_in = None, nNode_in = None, nThread_in=None, sJob_name_in =None, sPython_env_in =None, sQueue_in=None):\n if iWalltime_in is not None:\n iWalltime = iWalltime_in \n else:\n iWalltime = 2\n if nNode_in is not None:\n iNode = nNode_in \n else:\n iNode = 1\n if nThread_in is not None:\n nThread = nThread_in \n else:\n nThread = 40\n \n if sJob_name_in is not None:\n sJob_name = sJob_name_in \n else:\n sJob_name = 'parafly'\n if sPython_env_in is not None:\n sPython_env = sPython_env_in \n else:\n sPython_env = 'base'\n \n if sQueue_in is not None:\n sQueue = sQueue_in \n else:\n sQueue = 'short'\n \n sWalltime =\"{:0d}\".format(iWalltime )\n sNode = \"{:0d}\".format(iNode )\n sThread = \"{:0d}\".format(nThread )\n \n os.chdir(sDirectory_job)\n \n ofs = open(sBasename_job,\"w\") #write mode \n sLine = '#!/bin/bash' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --account=esmd' + '\\n'\n ofs.write( sLine ) \n\n #sLine = '#SBATCH --begin=now+1minutes' + '\\n'\n #ofs.write( sLine ) \n\n sLine = '#SBATCH --cpus-per-task=1 ' + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --dependency=singleton ' + '\\n'\n ofs.write( sLine )\n sLine = '#SBATCH --error=stderr_%j.err' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --job-name=' + sJob_name + ' # create a name for your job' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --mail-type=ALL' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --mail-user=' + sEmail + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --nodes=' + sNode + ' # node count' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --ntasks=' + sThread + ' # total number of tasks' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --output=stdout_%j.out' + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --partition=' + sQueue + '\\n' #can be improved here\n ofs.write( sLine ) \n sLine = '#SBATCH --time=' + sWalltime +':00:00 # total run time limit (HH:MM:SS)' + '\\n'\n ofs.write( sLine ) \n\n sLine = 'module purge' + '\\n'\n ofs.write( sLine ) \n sLine = 'module load parafly/2013' + '\\n'\n ofs.write( sLine ) \n sLine = 'module load anaconda3/2019.03' + '\\n'\n ofs.write( sLine ) \n sLine = 'source /share/apps/anaconda3/2019.03/etc/profile.d/conda.sh' + '\\n'\n ofs.write( sLine ) \n sLine = 'unset PYTHONHOME' + '\\n'\n ofs.write( sLine ) \n sLine = 'conda activate ' + sPython_env + '\\n'\n ofs.write( sLine ) \n\n sLine = 'ParaFly -c ' + sBasename_parafly + ' -CPU ' + sThread + ' -failed_cmds rerun.txt' + '\\n'\n ofs.write( sLine ) \n \n sLine = 'echo \" Job \" ' + '${SLURM_JOBID}' + ' is launched' + '\\n'\n ofs.write( sLine ) \n\n sLine = 'conda deactivate' + '\\n'\n ofs.write( sLine ) \n \n sLine = 'echo \"Finished\"' + '\\n'\n ofs.write( sLine ) \n ofs.close() \n \n return",
"def write_pbs(self):\n fout = open(\"runStarCCM.pbs\", \"w\")\n fout.write(\"#PBS -S /bin/csh\\n\")\n fout.write(\"#PBS -l select=\" + str(self.numNodes) + \":ncpus=\" + str(self.numCPUs) + \":mpiprocs=\" + str(self.mpiProcs) + \":model=has,walltime=\" + self.WallTime + \"\\n\\n\")\n fout.write(\"#PBS -W group_list=\" + self.GroupID + \"\\n\")\n fout.write(\"#PBS -j oe\\n\")\n fout.write(\"#PBS -q \" + self.queue + \"\\n\")\n fout.write(\"#PBS -N \" + self.jobName + \"\\n\")\n fout.write(\"#PBS -m e\\n\")\n fout.write(\"#PBS -W block=true\\n\\n\")\n fout.write(\"cd $PBS_O_WORKDIR\\n\")\n\n if self.runVolGrid == 1:\n #fout.write(\"/bin/rm -f \" + self.simMeshFile + \".sim\\n\")\n fout.write(\"/bin/rm -f starccmMeshRun.out\\n\")\n fout.write(\"chmod u+x \" + self.cshBatch1File + \".csh\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch1File + \".csh -powerOnDemand \" + self.javaBatch1File + \".java >& starccmMeshRun.out\\n\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a mesh run.'\\n\")\n\n if self.runCFD == 1:\n fout.write(\"chmod u+x \" + self.cshBatch2File + \".csh\\n\")\n fout.write(\"/bin/rm -f *.csv *.png starccmFlowRun.out\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch2File + \".csh -powerOnDemand \" + self.javaBatch2File + \".java \" + self.simMeshFile + \" >& starccmFlowRun.out\\n\\n\")\n fout.write(\"# rename the strange file names\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.csv ForceX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.csv ForceY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.csv ForceZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.csv MomentX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.csv MomentY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.csv MomentZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.csv Residuals.csv\\n\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.png ForceX.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.png ForceY.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.png ForceZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.png MomentX.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.png MomentY.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.png MomentZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.png Residuals.png\\n\")\n fout.write(\"/bin/mv \\$PWDUpperCp.png UpperCp.png\\n\")\n fout.write(\"/bin/mv \\$PWDLowerCp.png LowerCp.png\\n\")\n fout.write(\"/bin/rm -rf null\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a CFD run.'\\n\")\n\n fout.close()",
"def scriptGen(self,tmpd='/tmp/jose',libRev='last',submode='qsub',\n redirect=1,PBSoptions=''):\n jobname=self.name\n outdir=self.outd\n qsubdir=scratchdir+'/qsub/'+todayDate() #subdirectory to deposit the script\n if not os.path.exists(qsubdir): pastry('/bin/mkdir -p '+qsubdir)\n script=qsubdir+'/'+jobname+'.sh' #full script file name\n\n if len(jobname) > 15:\n sys.stderr.write('Error: job name '+jobname+' cannot exceed 15 characters')\n return ''\n if not os.path.exists(outdir): os.system('/bin/mkdir -p '+outdir)\n buf=''\n ulimit=int(float(mem_limit)*1024) #maximum resident memory size (Kb) to prevent swapping\n wd=tmpd+'/${PBS_JOBID}'\n #wd=tmpd+'/'+ re.compile('\\W').sub('',self.name) +'_$$' #working directory\n logname=jobname+'.log'\n local_log=wd+'/'+logname\n remote_log=outdir+'/'+logname\n buf= '#!/bin/bash\\n\\n'\n buf+= PBSoptions+'\\n\\n'\n buf+= '#bash function to update library\\n'\n buf+= self.updateNodeLib(libRev)+'\\n\\n'\n buf+= '#bash function to import temporary libs\\n'\n buf+= self.shared_temporal_libraries()+'\\n\\n'\n buf+= '#bash function to clean exit\\n'\n buf+= self.cleanup_exit(submode=submode)+'\\n\\n'\n buf+= 'echo \"'+script+'\"\\n' #write script name withing script body\n buf+= 'hostname\\n' #node where job will be run\n buf+= 'echo $PBS_JOBID\\n'\n buf+= 'ulimit -m '+`ulimit`+' #maximum memory\\n'\n buf+= 'source ~/.bash_profile >/dev/null #environment variables\\n'\n buf+= 'wd='+wd+' #working directory\\n'\n buf+= '/bin/mkdir -p $wd\\n'\n buf+= 'export LOCAL_LOG=\"'+local_log+'\"\\n'\n buf+= '/bin/touch $LOCAL_LOG\\n'\n if submode=='sub' and redirect:\n buf+='exec &> $LOCAL_LOG #redirect STODOUT, STDERR to LOCAL_LOG\\n' \n buf+= 'export REMOTE_LOG=\"'+remote_log+'\"\\n'\n\n but+= '#clean up old log file\\n'\n buf+= 'if [ -f $REMOTE_LOG ]; then\\n' \n buf+= ' /bin/rm -f $REMOTE_LOG\\n'\n buf+= 'fi\\n\\n'\n\n buf+= 'trap \"cleanup_exit 1\" TERM #in case of killing job\\n\\n'\n\n buf+= '#update node code library && import libraries\\n'\n buf+= 'if !('\n buf+= 'updateNodeLib && ' \n buf+= 'shared_temporal_libraries _PREPARE_'\n buf+= ');then\\n'\n buf+= ' cleanup_exit 1\\n'\n buf+= 'fi\\n\\n'\n \n buf+= '/bin/cp '+' '.join(self.inpl)+' $wd #bring input files\\n' \n buf+= 'cd $wd\\n\\n'\n buf+= '#Test command success\\n'\n buf+= 'exs=0 #variable holding script exit status\\n'\n buf+= 'if !('\n buf+= self.exe\n buf+= ');then\\n'\n buf+= ' exs=1\\n'\n buf+= 'fi\\n\\n'\n buf+= '#move even partial results (exs=1)\\n'\n buf+= '/bin/mv '+' '.join(self.outl)+' '+outdir+'\\n'\n buf+= 'cleanup_exit $exs'\n\n open(script,'w').write(buf)\n pastry('chmod u+x '+script)\n\n return script",
"def launchJobs(options, cmdargs, errStream=sys.stdin):\n\n if options.queue == LOCAL:\n launchLocalJobs(options,cmdargs,errStream)\n return\n\n logging.debug(\"Launching task array: %r\" % ({'tmpDir':options.tmpDir,'splits':options.splits,'fragName':options.fragBase,'cmd':cmdargs,'sgeOpts':options.sgeOptions,'job':options.jobName,'priority':options.priority,'loglevel':options.verbose,'wait':options.wait, 'type':options.taskType}))\n \n # SGE or SLURM submission prefix\n command = getSubmissionCommandPrefix(options)\n\n # batch_runner command\n command.append(BATCHLAUNCHER)\n command+=[\"--mode\",\"run\",\"--tmp_dir\",options.tmpDir,\"--frag_base\",\n options.fragBase, \"--frag_dir\", options.frag_dir, \"--frag_suffix\", options.fragSuff, \"--loglevel\", str(options.verbose), \"--queue\", options.queue]\n if options.inputFlag is not None:\n command.append('-i=%s' % (options.inputFlag))\n if options.prefixFlag is not None:\n command.append('-p=%s' % (options.prefixFlag))\n if options.threadsFlag is not None:\n command+=['-t',str(options.threadsFlag)]\n if options.outputFlags is not None:\n for flag in options.outputFlags:\n command.append('-o=%s' % (flag))\n if options.taskType is not None:\n command+=['--taskType',options.taskType]\n if options.cwd:\n command.append('--cwd')\n command.append('--')\n command+=cmdargs\n\n # redirect qsub output to std, silence if vebose is 0\n #if options.verbose==0:\n # qsubOuts=open(os.devnull,'w')\n #else:\n # qsubOuts=errStream\n \n # run command\n logging.debug('Launching task array: %s' % (formatCommand(command)))\n try:\n submissionOutput = subprocess.check_output(command)\n try:\n submissionOutput = submissionOutput.decode()\n except:\n pass\n if options.verbose>0:\n errStream.write(\"Submission Output: \" + submissionOutput)\n except subprocess.CalledProcessError as error:\n if options.wait and options.queue != SLURM:\n # when using -sync y, the exit code may come from a task\n # (which cleanup will handle)\n logging.warning(\"qsub returned an error code of: %d\" \n % error.returncode)\n else:\n raise error\n\n # get job id\n try:\n jobid = re.search(r'(\\d+)\\s*$',submissionOutput).group(1)\n options.jobid = jobid\n except:\n if options.queue==SLURM:\n logging.error(\"Cannot parse SLURM job id from '%s'\" % (submissionOutput))\n raise\n\n # SLURM doesn't allow waiting for completion on array jobs, so we hack:\n # use srun to start a dummy job that will wait for our job array\n if options.wait and options.queue==SLURM:\n waitForSlurmArray(options, errStream)",
"def create_job(jobrun, vcf_filenames):\n if jobrun == \"cluster\":\n \"\"\"\n Supports only PBS clusters for now.\n \"\"\"\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M [email protected]\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n for i in pbs_scripts:\n print \"Running: qsub %s\" % i\n #os.system(\"qsub %s\" % i)\n\n elif jobrun == \"parallel-local\":\n \"\"\"\n Generate a Command list of each job and run it in parallel on different cores available on local system\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n f3 = open(command_file, 'w+')\n\n\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M [email protected]\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n\n\n for i in pbs_scripts:\n f3.write(\"bash %s\\n\" % i)\n f3.close()\n with open(command_file, 'r') as fpp:\n for lines in fpp:\n lines = lines.strip()\n command_array.append(lines)\n fpp.close()\n print len(command_array)\n if args.numcores:\n num_cores = int(num_cores)\n else:\n num_cores = multiprocessing.cpu_count()\n results = Parallel(n_jobs=num_cores)(delayed(run_command)(command) for command in command_array)\n\n elif jobrun == \"parallel-single-cluster\":\n print \" \"\n else:\n \"\"\"\n Generate a Command list of each job and run it on local system one at a time\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n os.system(\"bash %s\" % command_file)",
"def write_shell_scripts(airfoils, qsh_template, nsetup, ntype, out_dir):\n for nairfoil, sim_setup in airfoils.iteritems():\n for aoa in sim_setup['aoas']:\n # Create simulation name\n sim_name = create_sim_name(nairfoil, ntype, nsetup, aoa)\n # Create fluent journal file\n with open(qsh_template, 'r') as f:\n qtxt = f.read()\n # Start to replace parameters inside the journal\n qtxt = qtxt.replace('SIMNAME', sim_name)\n qtxt = qtxt.replace('in.jou', sim_name + '.jou')\n qtxt = qtxt.replace('fluent.out', sim_name + '.out')\n # Write new shell script to out_dir\n qout = sim_name + '.qsh'\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n with open(os.path.join(out_dir, qout), 'w') as f:\n f.write(qtxt)\n return True",
"def write_lsf(workloads, input_file_parameters, command_line_parameters):\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_LSF_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n thread_index += 1\r\n\r\n # Generate parameter file for the bsub run\r\n resmng_config = []\r\n resmng_config.append('#BSUB-J \"{0}[1-{1}]\"'.format(\r\n input_file_parameters.job_name,\r\n len(workload)))\r\n resmng_config.append('#BSUB-i {0}_WORKLOAD_{1}_subshell_{2}{3}'.format(\r\n NAME,\r\n workload_index_string,\r\n '%I',\r\n appendix))\r\n resmng_config.append('#BSUB-o {0}_WORKLOAD_{1}_subshell_{2}{3}.out'.format(\r\n NAME,\r\n workload_index_string,\r\n '%I',\r\n appendix))\r\n resmng_config += input_file_parameters.resource_manager_params\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir, file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def write_torque(workloads, input_file_parameters, command_line_parameters):\r\n validate_resource_manager_parameters(\r\n input_file_parameters.resource_manager_params,\r\n ['#PBS -k', '#PBS -N', '#PBS -d', '#PBS -e', '#PBS -t'])\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_TORQUE_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n thread_index += 1\r\n\r\n # Create lines for TORQUE input file by generating job-name, output,\r\n # error and array parameters based on user input\r\n\r\n # IF YOU ADD NEW AUTOMATICALLY INFERRED PARAMETERS, REMEMBER TO VALIDATE\r\n # THEM AT THE BEGINNING OF THIS FUNCTION\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('#PBS -k eo')\r\n resmng_config.append('#PBS -N {0}'.format(input_file_parameters.job_name))\r\n resmng_config.append('#PBS -d {0}'.format(input_file_parameters.output_dir))\r\n resmng_config.append('#PBS -e {0}'.format(input_file_parameters.output_dir))\r\n resmng_config.append('#PBS -t {0}-{1}'.format(0, len(workload)-1))\r\n\r\n resmng_config.append('\\n\\n')\r\n subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n '\"${PBS_ARRAYID}\"',\r\n appendix)\r\n subshell_file_path = os.path.join(input_file_parameters.output_dir,\r\n subshell_file_path)\r\n resmng_config.append('source {0}'.format(subshell_file_path))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir,file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def submit_cluster_batch_file(self, num_bundles):\n\n import os\n import re\n import getpass\n import commands\n from time import strftime\n from indi_schedulers import cluster_templates\n\n print \"Submitting cluster job to %s..\" % self._platform\n\n # Create cluster log dir\n cluster_files_dir = \\\n os.path.join(self._config[\"output_directory\"], \"cluster_files\")\n if not os.path.exists(cluster_files_dir):\n os.makedirs(cluster_files_dir)\n\n # Batch file variables\n timestamp = str(strftime(\"%Y_%m_%d_%H_%M_%S\"))\n shell = commands.getoutput('echo $SHELL')\n user_account = getpass.getuser()\n\n # Set up config dictionary\n config_dict = {'timestamp': timestamp,\n 'shell': shell,\n 'job_name': self._run_name,\n 'num_tasks': num_bundles,\n 'queue': \"all.q\",\n 'par_env': \"mpi_smp\",\n 'cores_per_task': self._config[\"num_processors\"],\n 'user': user_account,\n 'work_dir': cluster_files_dir}\n\n # Get string template for job scheduler\n if self._platform == \"PBS\":\n env_arr_idx = '$PBS_ARRAYID'\n batch_file_contents = cluster_templates.pbs_template\n confirm_str = '(?<=Your job-array )\\d+'\n exec_cmd = 'qsub'\n elif self._platform == \"SGE\":\n env_arr_idx = '$SGE_TASK_ID'\n batch_file_contents = cluster_templates.sge_template\n confirm_str = '(?<=Your job-array )\\d+'\n exec_cmd = 'qsub'\n elif self._platform == \"SLURM\":\n hrs_limit = 8 * num_bundles\n time_limit = '%d:00:00' % hrs_limit\n config_dict[\"time_limit\"] = time_limit\n env_arr_idx = '$SLURM_ARRAY_TASK_ID'\n batch_file_contents = cluster_templates.slurm_template\n confirm_str = '(?<=Submitted batch job )\\d+'\n exec_cmd = 'sbatch'\n\n config_dict['env_arr_idx'] = env_arr_idx\n config_dict['run_cmd'] = 'echo \"Running task: %s\"' % env_arr_idx\n\n # Populate string from config dict values\n batch_file_contents = batch_file_contents % config_dict\n\n run_str = \"qap_measures_pipeline.py --bundle_idx %s --log_dir %s %s \"\\\n \"%s\" % (env_arr_idx, self._run_log_dir,\n self._config[\"subject_list\"],\n self._config[\"pipeline_config_yaml\"])\n\n batch_file_contents = \"\\n\".join([batch_file_contents, run_str])\n\n batch_filepath = os.path.join(cluster_files_dir, 'cpac_submit_%s.%s'\n % (timestamp, self._platform))\n\n with open(batch_filepath, 'w') as f:\n f.write(batch_file_contents)\n\n print \"Batch file written to %s..\" % batch_filepath\n\n # Get output response from job submission\n out = commands.getoutput('%s %s' % (exec_cmd, batch_filepath))\n\n # Check for successful qsub submission\n if re.search(confirm_str, out) == None:\n err_msg = 'Error submitting QAP pipeline run to %s queue' \\\n % self._platform\n raise Exception(err_msg)\n\n print \"Batch job submitted to %s queue.\" % self._platform\n\n # Get pid and send to pid file\n pid = re.search(confirm_str, out).group(0)\n pid_file = os.path.join(cluster_files_dir, 'pid.txt')\n with open(pid_file, 'w') as f:\n f.write(pid)",
"def run_jobs(num_runs):\n\n if os.environ.get('OS','') == 'Windows_NT':\n p = subprocess.Popen(\"dir /A:-d /B | findstr/r \\\"submit_run.*.sh\\\"\", shell=True, stdout=subprocess.PIPE)\n else:\n p = subprocess.Popen(\"ls -l | grep 'submit_run.*.sh' | awk '{print $9}'\", shell=True, stdout=subprocess.PIPE)# list SGE submit files\n out = p.stdout.read()\n \n if os.environ.get('OS','') == 'Windows_NT':\n fnames = out.rsplit(\"\\r\\n\")\n else:\n fnames = out.rsplit(\"\\n\")\n\n if len(fnames) > 0: del fnames[-1]\n\n # determine whether 'qsub' command is available\n if (is_valid_command('qsub')): # run the commands jobs using qsub\n for fname in fnames:\n p = subprocess.Popen(\"qsub %s\" % fname, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = p.stderr.read()\n if verbose:\n print out\n print \"Jobs submitted.\"\n else: # run the commands sequentially without using qsub\n print \"Error: 'qsub' is an invalid command.\"\n if os.environ.get('OS','') == 'Windows_NT':\n p = subprocess.Popen(\"dir /A:-d /B | findstr/r \\\"run.*.py\\\"\", shell=True, stdout=subprocess.PIPE)\n else:\n p = subprocess.Popen(\"ls -l | grep 'run.*.py' | awk '{print $9}'\", shell=True, stdout=subprocess.PIPE) # list SGE submit files\n out = p.stdout.read()\n\n if os.environ.get('OS','') == 'Windows_NT':\n fnames = out.rsplit(\"\\r\\n\")\n else:\n fnames = out.rsplit(\"\\n\")\n if len(fnames) > 0: del fnames[-1]\n\n for fname in fnames:\n for i in range(num_runs):\n if verbose:\n print \"Executing command: python %s %d\" % (fname, i)\n p = subprocess.Popen(\"python %s %d\" % (fname, i), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = p.stderr.read()\n if verbose:\n print out",
"def build_submission_script(path,\n script_name,\n save_history=True,\n walltime=10,\n allocation='p30653',\n cores=1,\n memory=4):\n\n # define paths\n path = abspath(path)\n job_script_path = join(path, 'scripts', 'submit.sh')\n\n # copy run script to scripts directory\n run_script = abspath(__file__).rsplit('/', maxsplit=2)[0]\n run_script = join(run_script, 'scripts', script_name)\n shutil.copy(run_script, join(path, 'scripts'))\n\n # determine queue\n if walltime <= 4:\n queue = 'short'\n elif walltime <= 48:\n queue = 'normal'\n else:\n queue = 'long'\n\n # declare outer script that reads PATH from file\n job_script = open(job_script_path, 'w')\n job_script.write('#!/bin/bash\\n')\n\n # move to job directory\n job_script.write('cd {:s} \\n\\n'.format(path))\n\n # begin outer script for processing job\n job_script.write('while IFS=$\\'\\\\t\\' read P\\n')\n job_script.write('do\\n')\n job_script.write('b_id=$(echo $(basename ${P}) | cut -f 1 -d \\'.\\')\\n')\n job_script.write(' JOB=`msub - << EOJ\\n\\n')\n\n # =========== begin submission script for individual batch ============\n job_script.write('#! /bin/bash\\n')\n job_script.write('#MSUB -A {:s} \\n'.format(allocation))\n job_script.write('#MSUB -q {:s} \\n'.format(queue))\n job_script.write('#MSUB -l walltime={0:02d}:00:00 \\n'.format(walltime))\n job_script.write('#MSUB -m abe \\n')\n #job_script.write('#MSUB -M [email protected] \\n')\n job_script.write('#MSUB -o ./log/${b_id}/outlog \\n')\n job_script.write('#MSUB -e ./log/${b_id}/errlog \\n')\n job_script.write('#MSUB -N ${b_id} \\n')\n job_script.write('#MSUB -l nodes=1:ppn={:d} \\n'.format(cores))\n job_script.write('#MSUB -l mem={:d}gb \\n\\n'.format(memory))\n\n # load python module and metabolism virtual environment\n job_script.write('module load python/anaconda3.6\\n')\n job_script.write('source activate ~/pythonenvs/growth_env\\n\\n')\n\n # move to job directory\n job_script.write('cd {:s} \\n\\n'.format(path))\n\n # run script\n job_script.write('python ./scripts/{:s}'.format(script_name)+' ${P} ')\n args = (save_history,)\n job_script.write('-s {:d}\\n'.format(*args))\n job_script.write('EOJ\\n')\n job_script.write('`\\n\\n')\n # ============= end submission script for individual batch ============\n\n # print job id\n #job_script.write('echo \"JobID = ${JOB} submitted on `date`\"\\n')\n job_script.write('done < ./batches/index.txt \\n')\n job_script.write('echo \"All batches submitted as of `date`\"\\n')\n job_script.write('exit\\n')\n\n # close the file\n job_script.close()\n\n # change the permissions\n chmod(job_script_path, 0o755)",
"def submit_scripts(self, out):\n program_folder = os.path.join(out, self.out)\n for config in self.configurations:\n config.submit_CaVEMan_scripts(\n program_folder, self.path2exe, self.ref_fai, self.file1, self.file2,\n self.config_file, self.qsub_dir, self.mstep_script, self.merge_script, self.estep_script\n )\n return None",
"def write_merge_script(s,inputs=[]):\n assert len(inputs)>0\n # hadd determines if we are merging main histograms file, or unfolding files\n hadd = True if s.jobtype == \"MRG\" else False\n s.jobfile = os.path.join(s.submitdir, 'merge_wasym.sh' if hadd else 'munfold_wasym.sh')\n s.outROOT = ('root_' if hadd else 'unfold_')+s.tag+\".root\"\n s.outROOTpath = os.path.join('results','ana_wasym',s.outROOT)\n pre = 'merge' if hadd else 'munfold'\n s.outOU = os.path.join(s.submitdir, pre+'_wasym.out.log')\n s.outER = os.path.join(s.submitdir, pre+'_wasym.err.log')\n s.outLOG = os.path.join(s.submitdir, pre+'_wasym.log.log')\n flist = 'wasym.root.list' if hadd else 'wasym.unfold.list'\n s.outputs += [flist]\n f = open(s.jobfile, \"w\")\n print >>f, SH_PRE%(s.fdic[0],s.fdic[1])\n print >>f,'RMODE=merge'\n print >>f,'nexpected=%d'%len(inputs)\n print >>f,'ntot=0'\n print >>f,'rm -f ${ROOTDIR}/%s ; touch ${ROOTDIR}/%s;'%(flist,flist)\n for fin in inputs:\n fname = fin if hadd else '%s.unfold'%fin\n print >>f,'f=\"${RESDIR}/%s.root\"'%fname\n print >>f,'st=`xrd uct3-xrd.mwt2.org existfile $f`'\n print >>f,'if [ \"$st\" == \"The file exists.\" ]; then'\n # xrootd files: reduce cache size, since hadd is stupid and will eat 100% of RAM\n print >>f,'echo ${RESHOST}/$f?cachesz=1000000 >> ${ROOTDIR}/%s'%flist\n print >>f,'((ntot++))'\n print >>f,'else'\n print >>f,'echo ERROR: failed to locate file $f'\n print >>f,'fi'\n print >>f,'if [ \"$ntot\" -eq \"$nexpected\" ]; then echo \"ALL DONE\"; else echo \"ERROR: missing `expr $nexpected - $ntot` files\"; echo exit 202; exit 202; fi'\n print >>f,'if [ \"$ntot\" -eq \"0\" ]; then echo \"ERROR: no files to merge\"; echo exit 203; exit 203; fi'\n print >>f,\"\"\"\n# a special version of hadd that adds files in chunks of 20\nfunction hadd2() {\n local per\n per=30 #20\n fin=$1\n opts=$2\n fout=$3\n shift\n n=`cat $fin | wc -l`\n ngrp=`expr $n / $per`\n nrem=`expr $n % $per`\n if [ \\\"$nrem\\\" == \\\"0\\\" ]; then ngrp=`expr $ngrp - 1`; fi\n for igrp in `seq 0 $ngrp`; do\n\timin=`expr $per \\* $igrp`\n\timax=`expr $per \\* $igrp + $per`\n\tif [ \\\"$imax\\\" -gt \\\"$n\\\" ]; then imax=`expr $per \\* $igrp + $nrem`; fi\n\t# offset by 1\n\timin=`expr $imin + 1`\n\timax=`expr $imax`\n\tidel=`expr $imax - $imin + 1`\n\techo \\\"===== Part $igrp / $ngrp : $imin to $imax\\\"\n\techo hadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\thadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\tst=$?\n\tif [ \\\"$st\\\" != \\\"0\\\" ]; then\n\t echo \\\"ERROR: merge step $igrp failed. Bailing out...\\\"\n\t return $st\n\tfi\n done\n # remove opts to speed up the last step and prevent creation of additional ntuple cycles;2\n echo hadd ${fout} ${fout}.TMPHADD_*root*\n hadd ${fout} ${fout}.TMPHADD_*root*\n st=$?\n rm -f ${fout}.TMPHADD_*root*\n return $st\n}\n \"\"\"\n if False:\n if hadd:\n print >>f, 'echo hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'echo hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'hadd2 ${ROOTDIR}/%s \"%s\" %s'%(flist,\"-O\" if hadd else \"-T\",s.outROOTpath)\n print >>f, \"status=$?\"\n print >>f, SH_POST\n f.close()\n os.system('chmod +x %s'%s.jobfile)\n s.write_submit_script()\n return True",
"def eddieSubmit(model_list, config,rootDir, verbose=False, resubmit=None, runCode=None):\r\n \r\n outputDir=os.path.join(rootDir,'jobOutput') # directory where output goes. \r\n # try and create it. \r\n try: \r\n os.makedirs(outputDir)\r\n except OSError:\r\n if not os.path.isdir(outputDir):\r\n raise\r\n \r\n sshCmd='ssh login01.ecdf.ed.ac.uk \" cd %s ; '%(os.getcwd()) # need to ssh to a login node to do things to Q's and cd to current dir\r\n #\r\n modelDirFile=os.path.join(rootDir,'tempDirList.txt') # name of file containing list of directories for post processing stage\r\n with open(modelDirFile, 'w') as f:\r\n for m in model_list:\r\n f.write(m.dirPath+','+m.ppExePath()+','+m.ppOutputFile()+'\\n') # write out info for post processing job.\r\n # submit the following.. Need path to postProcess.sh\r\n jobName='PP'+config.name()\r\n ## work out postprocess script path\r\n postProcess=os.path.expandvars('$OPTCLIMTOP/eddie/postProcess.sh')\r\n scriptName=os.path.expandvars('$OPTCLIMTOP/eddie/qsub.sh')\r\n # TODO move to better python syntax for var printing. Think can use named vars in below.\r\n qsub_cmd='qsub -l h_vmem=2G -l h_rt=00:10:00 -V -cwd -e %s -o %s'%(outputDir,outputDir) # std stuff for submission\r\n # means # 2 Gbyte Mem 10 min run, cur env, curr wd, output (error & std) in OutputDir\r\n # deal with runCode\r\n if runCode is not None: qsub_cmd += ' -P %s '%(runCode)\r\n cmd = qsub_cmd+' -t 1:%d -h -N %s '%(len(model_list),jobName)\r\n cmd += postProcess\r\n cmd += \" %s %s \"%(modelDirFile, config.fileName())\r\n if verbose: print \"postProcess task array cmd is \",cmd\r\n # run the post process and get its job id\r\n jid = subprocess.check_output(sshCmd+cmd+'\"', shell=True)\r\n # '\"' and shell=True seem necessary. Would be good to avoid both\r\n postProcessJID=jid.split()[2].split('.')[0] # extract the actual job id.\r\n if verbose: print \"postProcess array job id is %s\"%postProcessJID\r\n # TODO wrap this in a try/except block.\r\n # write the jobid + N into the model -- for later when \r\n # model gets some processing.\r\n for indx in range(len(model_list)):\r\n model_list[indx].jid=postProcessJID+'.%d'%(indx+1)\r\n\r\n # now submit this entire script so that the next iteration in the algorithm.\r\n # can be run\r\n if resubmit is not None:\r\n # submit the next job in the iteration. -hold_jid jid means the post processing job will only run after the\r\n # arry of post processing jobs has ran.\r\n jobName='RE'+config.name()\r\n # TODO move to better python syntax for var printing. Think can use named vars in...\r\n cmd = [qsub_cmd,'-hold_jid %s -N %s %s'%(postProcessJID,jobName, scriptName)]\r\n cmd.extend(resubmit) # add the arguments in including the programme to run..\r\n cmd=' '.join(cmd) # convert to one string.\r\n if verbose: print \"Next iteration cmd is \", cmd\r\n jid = subprocess.check_output(sshCmd+cmd+'\"', shell=True) # submit the script. Good to remove shell=True and '\"'\r\n jid = jid.split()[2] # extract the actual job id.\r\n if verbose: print \"Job ID for next iteration is %s\"%jid\r\n # now submit the models\r\n for m in model_list:\r\n # need to put the post processing job release command in the model somehow. Depends on the model\r\n # but we have a mark and a file. So will modify the file. The model should define this..\r\n # and insert the mark into the file. Would I think be easier to keep the line no and goto that.\r\n for line in fileinput.input(m.postProcessFile, inplace=1, backup='.bak2'):\r\n # if m.postProcessFile does not exist then get an error which is what we want!\r\n # fix your model method!\r\n print line[0:-1] # just print the line out.\r\n if m.postProcessMark in line: # got the mark so add some text.\r\n print sshCmd,'qrls ',m.jid,'\"' # this releases the post processing job.\r\n # dealt with modifying main file.\r\n modelSubmitName=m.submit()\r\n if verbose: print \"Submitting \",modelSubmitName\r\n subprocess.check_output(sshCmd+modelSubmitName+'\"',shell=True) # submit the script\r\n\r\n return True",
"def job_workflow(workflow, jobfiles, jwcl=WCL()):\n #pylint: disable=protected-access,expression-not-assigned,lost-exception\n global pool\n global results\n global stop_all\n global jobfiles_global\n global job_track\n global keeprunning\n global donejobs\n global result_lock\n global lock_monitor\n\n infullnames = {}\n with open(workflow, 'r') as workflowfh:\n # for each wrapper execution\n lines = workflowfh.readlines()\n sys.stdout.flush()\n inputs = {}\n # read in all of the lines in dictionaries\n for linecnt, line in enumerate(lines):\n wrapnum = miscutils.fwsplit(line.strip())[0]\n task = parse_wrapper_line(line, linecnt)\n #task['logfile'] = None\n wcl = WCL()\n with open(task['wclfile'], 'r') as wclfh:\n wcl.read(wclfh, filename=task['wclfile'])\n wcl.update(jwcl)\n\n # get fullnames for inputs and outputs\n ins, _ = intgmisc.get_fullnames(wcl, wcl, None)\n del wcl\n # save input filenames to eliminate from junk tarball later\n infullnames[wrapnum] = []\n for isect in ins:\n for ifile in ins[isect]:\n infullnames[wrapnum].append(ifile)\n jobfiles['infullnames'].extend(ifile)\n inputs[wrapnum] = (task, copy.deepcopy(jobfiles), jwcl, ins)\n job_track[task['wrapnum']] = (task['logfile'], jobfiles)\n # get all of the task groupings, they will be run in numerical order\n tasks = jwcl[\"fw_groups\"].keys()\n tasks.sort()\n # loop over each grouping\n manager = mp.Manager()\n for task in tasks:\n results = [] # the results of running each task in the group\n # get the maximum number of parallel processes to run at a time\n nproc = int(jwcl[\"fw_groups\"][task][\"fw_nthread\"])\n procs = miscutils.fwsplit(jwcl[\"fw_groups\"][task][\"wrapnums\"])\n tempproc = []\n # pare down the list to include only those in this run\n for p in procs:\n if p in inputs.keys():\n tempproc.append(p)\n procs = tempproc\n if nproc > 1:\n numjobs = len(procs)\n # set up the thread pool\n pool = mp.Pool(processes=nproc, maxtasksperchild=2)\n outq = manager.Queue()\n errq = manager.Queue()\n with lock_monitor:\n try:\n donejobs = 0\n # update the input files now, so that it only contains those from the current taks(s)\n for inp in procs:\n jobfiles_global['infullnames'].extend(infullnames[inp])\n # attach all the grouped tasks to the pool\n [pool.apply_async(job_thread, args=(inputs[inp] + (outq, errq, True,),), callback=results_checker) for inp in procs]\n pool.close()\n time.sleep(10)\n while donejobs < numjobs and keeprunning:\n count = 0\n while count < 2:\n count = 0\n try:\n msg = outq.get_nowait()\n print msg\n except:\n count += 1\n try:\n errm = errq.get_nowait()\n sys.stderr.write(errm)\n except:\n count += 1\n time.sleep(.1)\n except:\n results.append(1)\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=4, file=sys.stdout)\n\n raise\n\n finally:\n if stop_all and max(results) > 0:\n # wait to give everything time to do the first round of cleanup\n time.sleep(20)\n # get any waiting messages\n for _ in range(1000):\n try:\n msg = outq.get_nowait()\n print msg\n except:\n break\n for _ in range(1000):\n try:\n errm = errq.get_nowait()\n sys.stderr.write(errm)\n except:\n break\n if not result_lock.acquire(False):\n lock_monitor.wait(60)\n else:\n result_lock.release()\n # empty the worker queue so nothing else starts\n terminate(force=True)\n # wait so everything can clean up, otherwise risk a deadlock\n time.sleep(50)\n del pool\n while True:\n try:\n msg = outq.get(timeout=.1)\n print msg\n except:\n break\n\n while True:\n try:\n errm = errq.get(timeout=.1)\n sys.stderr.write(errm)\n except:\n break\n # in case the sci code crashed badly\n if not results:\n results.append(1)\n jobfiles = jobfiles_global\n jobfiles['infullnames'] = list(set(jobfiles['infullnames']))\n if stop_all and max(results) > 0:\n return max(results), jobfiles\n # if running in single threaded mode\n else:\n temp_stopall = stop_all\n stop_all = False\n\n donejobs = 0\n for inp in procs:\n try:\n jobfiles_global['infullnames'].extend(infullnames[inp])\n results_checker(job_thread(inputs[inp] + (sys.stdout, sys.stderr, False,)))\n except:\n (extype, exvalue, trback) = sys.exc_info()\n traceback.print_exception(extype, exvalue, trback, file=sys.stdout)\n results = [1]\n jobfiles = jobfiles_global\n if results[-1] != 0:\n return results[-1], jobfiles\n stop_all = temp_stopall\n\n\n return 0, jobfiles",
"def main(workdir):\n dir = os.path.expanduser(workdir)\n \n #read the .dat file\n f = open('{}smi.dat'.format(dir))\n par = imp.load_source('par', '', f)\n \n #make a sdf file for visualization\n output = pybel.Outputfile(\"sdf\", dir + \"species.sdf\",overwrite=True)\n for name in par.smiles:\n smi = par.smiles[name]\n obmol = pybel.readstring(\"smi\",smi)\n output.write(obmol)\n output.close()\n \n #list with the jobs that need to be done\n jobs = []\n \n #iterate the input files\n for name in par.smiles:\n #name = input_file.replace('.inp','') #name of the calculation\n test_dir = dir + name #location where the calculations will be done\n if not os.path.exists(test_dir):\n os.mkdir(test_dir)\n \n #copy the input file to the working directory\n write_input_file(par,name,par.smiles[name],test_dir + '/input.inp')\n job = workdir + name + '/'\n jobs.append(job)\n \n run_threads(jobs, 'eric', max_running = 3)",
"def RunJobs(self, runfile_mapping, server_run_map):\n if self.workflow is None:\n raise RuntimeError(\"Tried to create unnamed workflow!\")\n\n \n # Generate jobs for the first pass over the data\n for run in sorted(runfile_mapping.keys()):\n if self.VERBOSE>0:\n inputfiles=\"/%s/rawdata/volatile/%s/rawdata/Run%06d/hd_rawdata_*.evio\"%(HDRunFileRAIDList.GetRAIDDirFromRun(run,server_run_map),HDJobUtils.GetRunPeriodFromRun(run),run)\n\n # PASS 0\n print \"processing run %d, phase 0 ...\"%(int(run))\n\n # set up command to execute\n if self.nthreads:\n cmd += \" %s/scripts/%s %s %s %06d %03d %d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass0.csh\",self.basedir,run,inputfiles,int(self.nthreads))\n else:\n cmd += \" %s/scripts/%s %s %s %06d %03d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass0.csh\",self.basedir,run,inputfiles)\n\n # run command\n os.system(cmd)\n\n # PASS 1\n print \"processing run %d, phase 1 ...\"%(int(run))\n\n # set up command to execute\n if self.nthreads:\n cmd += \" %s/scripts/%s %s %s %06d %03d %d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass1.csh\",self.basedir,run,inputfiles,int(self.nthreads))\n else:\n cmd += \" %s/scripts/%s %s %s %06d %03d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass1.csh\",self.basedir,run,inputfiles)\n\n # run command\n os.system(cmd)",
"def main():\n init()\n separator_len = 40\n for s in stage_instances:\n print('='*separator_len)\n print(s.name)\n print('-'*separator_len)\n\n s.add_tasks() # Add tasks from previous stage\n s.revive_or_archive() # Revive killed tasks or move them to failed\n s.schedule_jobs() # Schedule new jobs if needed\n s.print_status()\n print('='*separator_len + '\\n')\n render(stage_instances)",
"def _generate_hadoop_shell_script(arg_list, shell_env, working_dir, turi_dist_path, **kwargs):\n script_file = tempfile.NamedTemporaryFile(delete=False)\n logger.debug(\"script file name: \" + script_file.name)\n\n filenames_needed = ['dml_commander_startup',\n 'dml_worker_startup',\n 'libdml_toolkits.so',\n 'libdml_shared.so',\n 'libhdfs.so',\n 'libminipsutil.so',\n 'libc++abi.so.1']\n\n copy_cmd = \"hadoop fs -copyToLocal \" + turi_dist_path + \"/\"\n for i in filenames_needed:\n script_file.write(copy_cmd + DD_BINS_PATH + i + '\\n')\n\n script_file.write(\"chmod 755 ./dml_commander_startup\\n\")\n script_file.write(\"chmod 755 ./dml_worker_startup\\n\")\n script_file.write(\"export LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/amd64/server:${LD_LIBRARY_PATH}\\n\")\n script_file.write(\"export CLASSPATH=$(hadoop classpath --glob)\\n\")\n for k, v in shell_env.items():\n script_file.write(\"export %s=%s\\n\" % (str(k), str(v)))\n\n script_file.write(\"env\\n\")\n #script_file.write(\"if [ $MY_RANK -eq 0 ]; then\\n\")\n #script_file.write(\" stress --vm-bytes 4g --vm-keep -m 1 --timeout 30\\n\")\n #script_file.write(\"fi\\n\")\n script_file.write(\"if [ $MY_RANK -eq 0 ]; then\\n\")\n script_file.write(\" echo Starting commander\\n\")\n script_file.write(\" ./dml_commander_startup \")\n for arg in arg_list[0]:\n if len(arg) > 7 and arg[0:7] == \"--args=\":\n script_file.write(arg[0:7] + '\"' + arg[7:] + '\" ')\n else:\n script_file.write(arg + \" \")\n script_file.write(\"> >(tee commander.log.stdout) 2> >(tee commander.log.stderr >&2)\")\n script_file.write(\"\\n\")\n script_file.write(\" echo Uploading commander log\\n\")\n script_file.write(\" hadoop fs -put \" + \"./commander.log.stdout \" +\n \"/\".join([working_dir, 'commander.log'])+\".stdout\\n\")\n script_file.write(\" hadoop fs -put \" + \"./commander.log.stderr \" +\n \"/\".join([working_dir, 'commander.log'])+\".stderr\\n\")\n script_file.write(\"else\\n\")\n script_file.write(\" let MY_RANK=$MY_RANK-1\\n\")\n script_file.write(\" echo Starting worker $MY_RANK\\n\")\n script_file.write(\" ./dml_worker_startup \")\n for arg in arg_list[1]:\n script_file.write(arg + \" \")\n script_file.write(\"> >(tee worker.log.stdout) 2> >(tee worker.log.stderr >&2)\")\n script_file.write(\"\\n\")\n script_file.write(\" echo Uploading worker $MY_RANK log\\n\")\n script_file.write(\" hadoop fs -put \" + \"./worker.log.stdout \" +\n \"/\".join([working_dir, \"worker_${MY_RANK}.log\"])+\".stdout\\n\")\n script_file.write(\" hadoop fs -put \" + \"./worker.log.stderr \" +\n \"/\".join([working_dir, \"worker_${MY_RANK}.log\"])+\".stderr\\n\")\n script_file.write(\"fi\\n\")\n script_file.close()\n return script_file.name",
"def run( **kwargs ):\n\n # combine options using correct preference\n options = dict(PARAMS.items())\n options.update( getCallerLocals().items() )\n options.update( kwargs.items() )\n\n def setupJob( session ):\n\n jt = session.createJobTemplate()\n jt.workingDirectory = os.getcwd()\n jt.jobEnvironment = { 'BASH_ENV' : '~/.bashrc' }\n jt.args = []\n jt.nativeSpecification = \"-V -q %s -p %i -N %s %s\" % \\\n (options.get(\"job_queue\", global_options.cluster_queue ),\n options.get(\"job_priority\", global_options.cluster_priority ),\n \"_\" + re.sub( \"[:]\", \"_\", os.path.basename(options.get(\"outfile\", \"ruffus\" ))),\n options.get(\"job_options\", global_options.cluster_options))\n\n # keep stdout and stderr separate\n jt.joinFiles=False\n\n return jt\n\n shellfile = os.path.join( os.getcwd(), \"shell.log\" )\n \n # run multiple jobs\n if options.get( \"statements\" ):\n\n statement_list = []\n for statement in options.get(\"statements\"): \n options[\"statement\"] = statement\n statement_list.append(buildStatement( **options))\n \n if options.get( \"dryrun\", False ): return\n\n # get session for process - only one is permitted\n pid = os.getpid()\n if pid not in global_sessions: \n\n L.debug( \"creating new drmaa session for pid %i\" % pid )\n global_sessions[pid]=drmaa.Session() \n global_sessions[pid].initialize()\n\n session = global_sessions[pid]\n \n jt = setupJob( session )\n \n jobids, filenames = [], []\n for statement in statement_list:\n # create job script\n tmpfile = tempfile.NamedTemporaryFile( dir = os.getcwd() , delete = False )\n tmpfile.write( \"#!/bin/bash\\n\" ) # -l -O expand_aliases\\n\" )\n tmpfile.write( 'echo \"START--------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( '''echo 'statement=%s' >> %s\\n''' % (statement, shellfile) )\n tmpfile.write( \"set &>> %s\\n\" % shellfile)\n tmpfile.write( \"module list &>> %s\\n\" % shellfile )\n tmpfile.write( 'echo \"END----------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( expandStatement(statement) + \"\\n\" )\n tmpfile.close()\n\n # build paths\n job_path = os.path.abspath( tmpfile.name )\n stdout_path = job_path + \".stdout\" \n stderr_path = job_path + \".stderr\" \n\n jt.remoteCommand = job_path\n jt.outputPath=\":\"+ stdout_path\n jt.errorPath=\":\" + stderr_path\n\n os.chmod( job_path, stat.S_IRWXG | stat.S_IRWXU )\n\n jobid = session.runJob(jt)\n jobids.append( jobid )\n filenames.append( (job_path, stdout_path, stderr_path) )\n\n L.debug( \"job has been submitted with jobid %s\" % str(jobid ))\n \n L.debug( \"waiting for %i jobs to finish \" % len(jobids) )\n session.synchronize(jobids, drmaa.Session.TIMEOUT_WAIT_FOREVER, False)\n \n # collect and clean up\n for jobid, statement, paths in zip( jobids, statement_list, filenames) :\n job_path, stdout_path, stderr_path = paths\n retval = session.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)\n\n stdout, stderr = getStdoutStderr( stdout_path, stderr_path )\n\n if retval.exitStatus != 0:\n raise PipelineError( \"---------------------------------------\\n\"\n \"Child was terminated by signal %i: \\n\"\n \"The stderr was: \\n%s\\n%s\\n\" \n \"---------------------------------------\\n\" % \\\n (retval.exitStatus, \n \"\".join( stderr),\n statement ) )\n\n os.unlink( job_path )\n \n session.deleteJobTemplate(jt)\n\n # run a single parallel job\n elif (options.get( \"job_queue\" ) or options.get( \"to_cluster\" )) and not global_options.without_cluster:\n\n statement = buildStatement( **options )\n\n if options.get( \"dryrun\", False ): return\n\n tmpfile = tempfile.NamedTemporaryFile( dir = os.getcwd() , delete = False )\n tmpfile.write( \"#!/bin/bash\\n\" ) # -l -O expand_aliases\\n\" )\n\n tmpfile.write( 'echo \"START--------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( 'echo \"statement=%s\" >> %s\\n' % (statement, shellfile) )\n tmpfile.write( \"set &>> %s\\n\" % shellfile)\n tmpfile.write( \"module list &>> %s\\n\" % shellfile )\n tmpfile.write( 'echo \"END----------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( expandStatement( statement ) + \"\\n\" )\n tmpfile.close()\n\n job_path = os.path.abspath( tmpfile.name )\n stdout_path = job_path + \".stdout\" \n stderr_path = job_path + \".stderr\" \n\n os.chmod( job_path, stat.S_IRWXG | stat.S_IRWXU )\n\n # get session for process - only one is permitted\n pid = os.getpid()\n if pid not in global_sessions:\n L.debug( \"creating new drmaa session for pid %i\" % pid )\n global_sessions[pid]=drmaa.Session() \n global_sessions[pid].initialize()\n\n session = global_sessions[pid]\n\n jt = setupJob( session )\n\n jt.remoteCommand = job_path\n # later: allow redirection of stdout and stderr to files; can even be across hosts?\n jt.outputPath=\":\"+ stdout_path\n jt.errorPath=\":\" + stderr_path\n\n if \"job_array\" in options and options[\"job_array\"] != None:\n # run an array job\n start, end, increment = options.get(\"job_array\" )\n L.debug(\"starting an array job: %i-%i,%i\" % (start, end, increment ))\n # sge works with 1-based, closed intervals\n jobids = session.runBulkJobs( jt, start+1, end, increment )\n L.debug( \"%i array jobs have been submitted as jobid %s\" % (len(jobids), jobids[0]) )\n retval = session.synchronize(jobids, drmaa.Session.TIMEOUT_WAIT_FOREVER, True)\n else:\n jobid = session.runJob(jt)\n L.debug( \"job has been submitted with jobid %s\" % str(jobid ))\n try:\n retval = session.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)\n except Exception, msg:\n # ignore message 24 in PBS\n # code 24: drmaa: Job finished but resource usage information and/or termination status could not be provided.\":\n if not msg.message.startswith(\"code 24\"): raise\n retval = None\n\n stdout, stderr = getStdoutStderr( stdout_path, stderr_path )\n\n if \"job_array\" not in options:\n if retval and retval.exitStatus != 0:\n raise PipelineError( \"---------------------------------------\\n\"\n \"Child was terminated by signal %i: \\n\"\n \"The stderr was: \\n%s\\n%s\\n\"\n \"-----------------------------------------\" % \\\n (retval.exitStatus, \n \"\".join( stderr), statement))\n \n session.deleteJobTemplate(jt)\n os.unlink( job_path )\n\n else:\n statement = buildStatement( **options )\n\n if options.get( \"dryrun\", False ): return\n \n if \"<(\" in statement:\n if \"'\" in statement: raise ValueError( \"advanced bash syntax combined with single quotes\" )\n statement = \"\"\"/bin/bash -c '%s'\"\"\" % statement\n\n process = subprocess.Popen( expandStatement( statement ),\n cwd = os.getcwd(), \n shell = True,\n stdin = subprocess.PIPE,\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE )\n\n # process.stdin.close()\n stdout, stderr = process.communicate()\n\n if process.returncode != 0:\n raise PipelineError( \"---------------------------------------\\n\"\n \"Child was terminated by signal %i: \\n\"\n \"The stderr was: \\n%s\\n%s\\n\"\n \"-----------------------------------------\" % \\\n (-process.returncode, stderr, statement ))",
"def submit(self, **kwargs):\n pwd = curdir\n wd = dirname(self.logFile)\n chdir(wd)\n d = OrderedDict()\n #d['universe'] = 'vanilla'\n #d['executable'] = self.command\n\td['job-name'] = self.name\n\td['nodes'] = 1\n\td['partition'] = defaults.get('queue')\n\td['time'] = defaults.get(\"cputime\")\n\td['mem'] = defaults.get(\"memory\")\n d['output'] = op_join(wd,\"output.log\")\n d['error'] = op_join(wd,\"output.err\")\n csi_file = open(\"submit.sh\", \"w\")\n\tcsi_file.write(\"#!/bin/bash\\n\")\n data = [\"#SBATCH --%s=%s\\n\" % (k, v) for k, v in d.iteritems()]\n csi_file.write(\"\".join(data))\n\tcsi_file.write(\"export DAMPE_WORKFLOW_SERVER_URL=%s\\n\"%DAMPE_WORKFLOW_URL)\n csi_file.write(\"bash script\\n\")\n csi_file.close()\n output = self.__run__(\"sbatch submit.sh\")\n chdir(pwd)\n return self.__regexId__(output)",
"def del_jobs():\n\n # find Python run scripts and shell submit scripts\n if os.environ.get('OS','') == 'Windows_NT':\n p1 = subprocess.Popen(\"dir /A:-d /B | findstr/r \\\"run[.].*[.]py\\\"\", shell=True, stdout=subprocess.PIPE) # list Python run files\n p2 = subprocess.Popen(\"dir /A:-d /B | findstr/r \\\"submit_run[.].*[.]sh\\\"\", shell=True, stdout=subprocess.PIPE) # list SGE submit files\n else:\n p1 = subprocess.Popen(\"ls -l | grep 'run[.].*[.]py' | awk '{print $9}'\", shell=True, stdout=subprocess.PIPE) # list Python run files\n p2 = subprocess.Popen(\"ls -l | grep 'submit_run[.].*[.]sh' | awk '{print $9}'\", shell=True, stdout=subprocess.PIPE) # list SGE submit files\n out1 = p1.stdout.read()\n out2 = p2.stdout.read()\n\n if os.environ.get('OS','') == 'Windows_NT':\n fnames1 = out1.rsplit(\"\\r\\n\")\n fnames2 = out2.rsplit(\"\\r\\n\")\n else:\n fnames1 = out1.rsplit(\"\\n\")\n fnames2 = out2.rsplit(\"\\n\")\n if len(fnames1) > 0: del fnames1[-1]\n if len(fnames2) > 0: del fnames2[-1]\n\n fnames = fnames1 + fnames2\n for fname in fnames:\n if verbose:\n print \"Removing '%s'\" %fname\n os.remove(fname)\n\n # find and delete SGE output files\n if os.environ.get('OS','') != 'Windows_NT':\n p = subprocess.Popen(\"ls -l | egrep '*.o[0-9]{4,8}[.][0-9]+$' | awk '{print $9}'\", shell=True, stdout=subprocess.PIPE) # list SGE output files\n out = p.stdout.read()\n fnames = out.rsplit(\"\\n\")\n if len(fnames) > 0: del fnames[-1]\n \n for fname in fnames:\n# if verbose:\n print \"Removing '%s'\" %fname\n os.remove(fname)",
"def createjob(args):\n ncell = args.ncell\n nmg = args.nmg\n nsi = args.nsi\n nvac = args.nvac\n a0 = args.a0\n temp = args.temp\n nseeds = args.nseeds\n seeds = args.seeds\n nsteps = args.nsteps\n foldername_append = args.foldername_append\n pot = args.pot\n submit = args.submit\n submitdebug = args.submitdebug\n submittime_hours = args.submittime_hours\n test = args.test\n testfiles = args.testfiles\n nodes = args.nodes\n verbose = args.verbose\n\n\n ### check if ase runner/quippy/lammpps-data formats are known\n ase_formats = mu.ase_get_known_formats_class(verbose=True)\n ase_formats.check_if_default_formats_known(copy_and_adapt_formatspy_anyhow=False)\n\n # definex ffsocket inet/unix\n if nodes == 1:\n ffsocket = \"unix\"\n elif nodes > 1:\n ffsocket = \"inet\"\n else:\n sys.exit(\"Number of nodes has to be positive!\")\n\n\n # define ntasks, neval\n lmp_par = 2 # = OMP_NUM_THREADS\n ntasks = cores = nodes * 28\n ipi_inst = 4 # for sure best on fidis\n neval = ipi_inst*2 # was alwasy better, for ompi and impi\n\n ##### get the seed(s).\n if type(seeds) == bool:\n seeds = random.sample(range(1, 999999), nseeds)\n print('seeds',seeds)\n if test == True:\n nseeds = 1\n seeds = [1]\n print('seeds',seeds)\n nseeds = len(seeds)\n\n ##### a few checks\n scripts = mu.scripts()\n mypot = mu.mypot(pot)\n if submit is True or submitdebug is True:\n hostcheck = os.environ[\"myhost\"]\n if hostcheck == \"\":\n sys.exit('host unknown 87')\n\n\n ##### here only chck if the potential can be set up. (in.lmp)\n ##### the same command is then executed for every kmc folder\n ace = mu.ase_calculate_ene(pot=pot,\n potpath=False,\n units='eV',geopt=False,kmc=True,verbose=verbose)\n ace.pot_get_and_ase_lmp_cmd(kmc=True,temp=temp,nsteps=nsteps,ffsocket=ffsocket)\n\n ##### if test\n if test == True:\n nsteps = 50\n\n file_ipi_input_runner = scripts + \"/i-pi-mc_scripts/input-runner.xml\"\n\n\n ####################################\n # get directory\n ####################################\n if verbose:\n print(\"get directory\")\n pcsi = nsi/ncell**3.*100\n pcmg = nmg/ncell**3.*100\n pcvac = nvac/ncell**3.*100\n if args.cubic == True:\n pc = \"cubic\"\n else:\n pc = \"primitive\"\n directory = str(ncell)+\"x\"+str(ncell)+\"x\"+str(ncell)+\"_\"+pc+\"_\"+pot+\"_\"+\\\n str(temp)+\"K_\"+\\\n str(nvac)+\"Vac_\"+str(nmg)+\"Mg_\"+str(nsi)+\"Si__\"+\\\n str(round(pcvac,3))+\"pctVac_\"+str(round(pcmg,3))+\"pctMg_\"+str(round(pcsi,3))+\"pctSi\"\n if foldername_append != \"\":\n directory = directory+\"_\"+foldername_append\n\n ###############################################\n # make the structure\n ###############################################\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell,nsi,nmg,nvac,a0,create_fake_vacancy = True,cubic=args.cubic)\n atomsc = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell,nsi,nmg,nvac,a0,cubic=args.cubic)\n\n # make the atomic structure\n # this was to play ... not necessary now?\n if False:\n nndist = a0/np.sqrt(2.)\n\n from ase.io import read as ase_read\n from ase.io import write as ase_write\n\n ###############################################\n # get the amount of 1NN in a relly large cell\n ###############################################\n atomsc_fakevac_i = ase_read('dataxx.extxyz3',index=\":\",format='extxyz') # works, cell ist not changed\n #atomsc_fakevac_i = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=10,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n #nn = mu.ase_get_neighborlist(atomsc_fakevac_i,atomnr=0,cutoff=3.,skin=0.1)\n #print(\"nn\",nn,'len',len(nn))\n #nn = mu.ase_get_neighborlist(atomsc_fakevac_i,atomnr=0,cutoff=8.5,skin=0.1)\n #print(\"nn\",nn,'len',len(nn))\n #sys.exit()\n\n print(len(atomsc_fakevac_i),type(atomsc_fakevac_i))\n\n for idx,i in enumerate(atomsc_fakevac_i):\n print('aa',atomsc_fakevac_i[idx].positions[0])\n #print('aa',i.positions[0])\n print('ipi')\n atomsc_fakevac_i = ase_read('dataxx.ipi2',index=\":\",format='ipi') # works, cell ist not changed\n print(len(atomsc_fakevac_i),type(atomsc_fakevac_i))\n for idx,i in enumerate(atomsc_fakevac_i):\n print('aa',atomsc_fakevac_i[idx].positions[0])\n #print('aa',i.positions[0])\n print('quippy')\n atomsc_fakevac_i = ase_read('dataxx.quippy.xyz2',index=\":\",format='quippy') # works, cell ist not changed\n\n\n\n filename = '../sim.xyz'\n filename = '../simulation.pos_0.xyz'\n mu.count_amount_1NN_around_vacancies(filename,cutoffa=nndist,cutoffb=a0,skin=0.1,format='ipi')\n sys.exit()\n\n def mysave_quippy_xyz(atomsc_fakevac,text=False):\n if type(text) == bool:\n sys.exit('define text')\n atomsc_fakevac.write('data.quippy.xyz',format='quippy',append=True)\n #atomsc_fakevac.write('data.xyz',format=\"extxyz\",append=True)\n atomsc_fakevac.write('data'+text+'.quippy.xyz',format='quippy',append=True)\n #atomsc_fakevac.write('data'+text+'.xyz',format=\"extxyz\",append=True)\n return\n\n # create Al with single vacancy\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n #print('from ....',(atomsc_fakevac.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac.positions)[i])\n print('NN_1_indices (orig ):',NN_1_indices)\n print('NN_2_indices (orig ):',NN_2_indices)\n #sys.exit()\n atomsc_fakevac.write('dataxx.quippy.xyz',format='quippy',append=True)\n atomsc_fakevac.write('dataxx.poscar',format='vasp',append=True)\n atomsc_fakevac.write('dataxx.ipi',format='ipi',append=True) # works, currently so implemented that it canges cell\n atomsc_fakevac.write('dataxx.xyz',format='xyz',append=True)\n atomsc_fakevac.write('dataxx.extxyz',format='extxyz',append=True)\n atomsc_fakevac.write('dataxx.lammps-data',format='lammps-data',append=True)\n atomsc_fakevac.write('dataxx.lammps-runner',format='lammps-runner',append=True)\n\n atomsc_fakevac_a = ase_read('dataxx.extxyz',format='extxyz') # works, cell ist not changed\n atomsc_fakevac_a.write('dataxx.extxyz2',format='extxyz',append=True) # works, cell is not changed\n\n atomsc_fakevac_b = ase_read('dataxx.xyz',format='xyz') # not working # but this should work\n atomsc_fakevac_b.write('dataxx.xyz2',format='xyz',append=True) # this is working\n\n atomsc_fakevac_c = ase_read('dataxx.ipi',format='ipi') # works, currently so implemented that it canges cell\n #print('ipi cell',atomsc_fakevac_c.get_cell())\n\n atomsc_fakevac_c.write('dataxx.ipi2',format='ipi',append=True) # works, just writes the cell it gests.\n atomsc_fakevac_c.write('dataxx.ipi2_poscar',format='vasp',append=True) # works, just writes the cell it gests.\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_c,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (ipi ):',NN_1_indices)\n print('NN_2_indices (ipi ):',NN_2_indices)\n #print('from ....',(atomsc_fakevac_c.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac_c.positions)[i])\n\n atomsc_fakevac_cc = ase_read('dataxx.ipi2_poscar',format='vasp') # works, currently so implemented that it canges cell\n atomsc_fakevac_cc.write('dataxx.ipi2_poscar2',format='vasp',append=True)\n atomsc_fakevac_cc.write('dataxx.ipi2_poscar2_ipi',format='ipi',append=True) # works, just writes the cell it gests.\n #print('ipi cell2 (ext):',atomsc_fakevac_cc.get_cell())\n #print()\n #print('now quippy')\n atomsc_fakevac_d = ase_read('dataxx.quippy.xyz',format='quippy')\n #print('quippy cell (ext)',atomsc_fakevac_d.get_cell())\n atomsc_fakevac_d.write('dataxx.quippy.xyz2',format='quippy',append=True)\n atomsc_fakevac_d.write('dataxx.quippy.xyz2_extxyz',format='extxyz',append=True)\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_d,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (quippy):',NN_1_indices)\n print('NN_2_indices (quippy):',NN_2_indices)\n #print('from ....',(atomsc_fakevac_d.positions)[0])\n #for i in NN_1_indices:\n # print((atomsc_fakevac_d.positions)[i])\n path = \"/home/glensk/kmc/run_michele/Si6Mg6V1.1_/simulation.pos_libatom_2struct.xyz\"\n atomsc_fakevac_e = ase_read(path,format='quippy')\n\n NN_1_indices, NN_2_indices = mu.ase_get_neighborlist_1NN_2NN(atomsc_fakevac_e,atomnr=0,cutoffa=nndist,cutoffb=a0,skin=0.1)\n print('NN_1_indices (kmc ):',NN_1_indices)\n print('NN_2_indices (kmc ):',NN_2_indices)\n sys.exit()\n\n NN_1_indices = mu.ase_get_neighborlist(atomsc_fakevac,atomnr=0,cutoff=nndist,skin=0.1)\n NN_1_2_indices_tmp = mu.ase_get_neighborlist(atomsc_fakevac,atomnr=0,cutoff=a0,skin=0.1)\n print('NN_1_indices :',NN_1_indices)\n NN_2_indices = np.sort(np.array(mu.diff(NN_1_2_indices_tmp,NN_1_indices)))\n print('NN_2_indices :',NN_2_indices)\n NN_1_2_indices = np.concatenate((NN_1_indices, NN_2_indices ))\n print('NN_1_2_indices:',NN_1_2_indices)\n\n\n # fill only 1NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN\")\n for ii in NN_1_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN\")\n\n # fill only 2NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"2NN\")\n for ii in NN_2_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"2NN\")\n\n # fill 1NN and 2NN (with one species)\n for i in [ 'Mg', 'Si' ]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1and2NN\")\n for ii in NN_1_2_indices:\n atomsc_fakevac[ii].symbol = i\n mysave_quippy_xyz(atomsc_fakevac,text=\"1and2NN\")\n\n # dif compositions in 1NN shell\n filling = [ 2,4,6,8,10]\n for fi in filling:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=\"XX_0\")\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN_diffcomp\")\n for idx,ii in enumerate(NN_1_indices):\n if idx < fi: ch = \"Mg\"\n else: ch = \"Si\"\n atomsc_fakevac[ii].symbol = ch\n mysave_quippy_xyz(atomsc_fakevac,text=\"1NN_diffcomp\")\n\n\n sys.exit()\n\n #mu.ase_get_known_formats(show=True, add_missing_formats=False, copy_formats=False, verbose=False,show_formatspy=True)\n for i in [ 'Mg', 'Si' ]:\n for ii in [ 0,1,2,3,4,5]:\n atomsc_fakevac = mu.get_ase_atoms_object_kmc_al_si_mg_vac(ncell=5,nsi=0,nmg=0,nvac=1,a0=a0,cubic=False,create_fake_vacancy = True,normal_ordering=i+'_'+str(ii))\n\n\n sys.exit()\n\n\n # show the input variables\n print('--------------------------- check the input --------------------------------')\n print('JOBS (nseeds) ',nseeds,'(defined by -nseeds / or -seeds)')\n print('seeds ',seeds)\n print('nsteps ',nsteps)\n print()\n print('ncell ',ncell,\"(\",atomsc.get_number_of_atoms(),\"atoms )\")\n print('nsi ',nsi, \"(\",pcsi,\"at%)\")\n print('nmg ',nmg,\"(\",pcmg,\"at%)\")\n print('nvac ',nvac,\"(\",pcvac,\"at%)\")\n print('a0 ',a0,\"angstrom\")\n print('temp ',temp,\"K\")\n print()\n print('mypot.pot ',mypot.pot)\n print('mypot.potpath ',mypot.potpath)\n print()\n print('directory ',directory)\n print('submit ',submit)\n print('submitdebug ',submitdebug)\n print()\n print('nodes ',nodes)\n print('ffsocket ',ffsocket)\n #print('python ver ',sys.version_info[0])\n #print()\n print('--------------------------- check the input --------------------------------')\n if submit == True or submitdebug == True:\n mu.get_from_prompt_Yy_orexit(\"Are the ine input variables ok? [y]es: \")\n\n # make the directory\n if os.path.isdir(directory):\n mu.get_from_prompt_Yy_orexit(\"This main directory exists already, shall I add jobs? [y]es: \")\n mu.mkdir(directory)\n\n # create README.md\n IPI_COMMAND = os.environ[\"IPI_COMMAND\"]\n LAMMPS_COMMAND = os.environ[\"LAMMPS_COMMAND\"]\n mu.create_READMEtxt(directory,add=[\"# to start manually (1): python \"+IPI_COMMAND+\" input-runner.xml\",\"# to start manually (2):\"+LAMMPS_COMMAND+\" < in.lmp\"])\n\n for seed in seeds:\n\n # make jobdirectory\n jobdir = directory+'/seed'+str(seed)\n print('jobdir',jobdir)\n if os.path.exists(jobdir):\n sys.exit(\"jobdirectory \"+str(jobdir)+\" already exists!\")\n mu.mkdir(jobdir)\n\n # get data.lmp and data.ipi\n atomsc.write(jobdir+'/data.runnerformat.lmp',format='lammps-runner')\n atomsc_fakevac.write(jobdir+'/data.ipi',format='ipi')\n atomsc_fakevac.write(jobdir+'/data.extxyz',format='extxyz')\n #atomsc_fakevac.write(jobdir+'/data_fakevac.ipi',format='ipi')\n\n if testfiles == True:\n atomsc.write(jobdir+'/data.lmp',format='lammps-data')\n atomsc.write(jobdir+'/data.POSCAR',format='vasp')\n atomsc.write(jobdir+'/data.xyz',format='xyz')\n atomsc.write(jobdir+'/data.extxyz',format='extxyz')\n atomsc.write(jobdir+'/data.espresso-in',format='espresso-in')\n\n # create in.lmp\n ace = mu.ase_calculate_ene(pot=pot,potpath=mypot.potpath,\n units='eV',geopt=False,kmc=True,verbose=verbose)\n address = socket.gethostname()+\"_\"+os.path.basename(jobdir)\n print('address',address)\n ace.pot_get_and_ase_lmp_cmd(kmc=True,temp=temp,nsteps=nsteps,ffsocket=ffsocket,address=address)\n mu.lammps_write_inputfile(folder=jobdir,filename='in.lmp',positions='data.runnerformat.lmp',ace=ace)\n\n # create input-runner.xml (should be made without copying)\n mu.create_ipi_kmc_inputfile(jobdir,filename=\"input-runner.xml\",nsteps=nsteps,stride=100,seed=seed,a0=a0,ncell=ncell,nsi=nsi,nmg=nmg,nvac=nvac,neval=neval,temp=temp,nodes=nodes,address=address,testrun=test,cubic=args.cubic)\n\n # create submit-ipi-kmc.sh (should be made without copying)\n mu.create_submitskript_ipi_kmc(jobdir+\"/submit-ipi-kmc.sh\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=True)\n\n # create osubmit-ipi-kmc.sh (should be made without copying)\n mu.create_submitskript_ipi_kmc(jobdir+\"/osubmit-ipi-kmc.sh\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=False)\n\n # submit the job (execute either this or submit-ipi-kmc.sh_all3, not both)\n #mu.submitjob(submit=submit,submitdebug=submitdebug,jobdir=jobdir,submitskript=\"submit-ipi-kmc.sh\")\n\n # get submit-ipi-kmc.sh_all3 (should be made without copying)\n if nseeds == 3:\n mu.create_submitskript_ipi_kmc(directory+\"/submit-ipi-kmc.sh_all3\",nodes,ntasks,\n lmp_par=lmp_par,\n ipi_inst=ipi_inst,\n ffsocket=ffsocket,\n submittime_hours=submittime_hours,\n SBATCH=True,\n LOOPFOLDER=True)\n\n # submit the job (execute either this or submit-ipi-kmc.sh_all3, not both)\n #mu.submitjob(submit=submit,submitdebug=submitdebug,jobdir=directory,submitskript=\"submit-ipi-kmc.sh_all3\")\n if submit == True:\n mu.submitjob(submit_to_que=True,submit_to_debug_que=False,jobdir=directory,submitskript=\"submit-ipi-kmc.sh_all3\")\n\n\n print('done')\n return",
"def write_dag_script(s):\n assert len(s.jobs) in (1,2),'ERROR: write_dag_script should be called from the final merge JobSet'\n s.dag = os.path.join( s.jobs[0].submitdir, 'global.dag')\n f = open(s.dag,'w')\n # condor submit scripts\n for dep in s.get_deps():\n print >>f,'Job %s %s'%(dep.jobname(),dep.condorfile)\n for job in s.jobs:\n print >>f,'Job %s %s'%(job.jobname(),job.condorfile)\n # retry instructions\n for dep in s.get_deps():\n print >>f,'Retry %s %s'%(dep.jobname(),NRETRY)\n for job in s.jobs:\n print >>f,'Retry %s %s'%(job.jobname(),NRETRY)\n a_parent = ' '.join( [ dep.jobname() for dep in s.get_deps() ] )\n for job in s.jobs:\n a_child = job.jobname()\n print >>f,'PARENT %s CHILD %s'%(a_parent,a_child)\n f.close()",
"def create_slurm_file(\n slurm_filepath: Path, batch_size: int, num_batches: int, time_limit: int\n):\n slurm_string = f\"\"\"#!/usr/bin/bash\n#SBATCH --job-name=pctsp\n#SBATCH --partition=cpu-batch\n#SBATCH --ntasks=10\n#SBATCH --cpus-per-task=1\n#SBATCH --mem-per-cpu=4000\n#SBATCH --time={time_limit}:00:00\n#SBATCH --array=0-{num_batches-1}\n\n## Loop over each batch ##\nstart=$(($SLURM_ARRAY_TASK_ID * {batch_size}))\nsrun --ntasks=1 python scripts/batch_model.py $start {batch_size} \\\n\"\"\"\n slurm_filepath.write_text(slurm_string)"
] | [
"0.6961429",
"0.67148894",
"0.6494868",
"0.6412396",
"0.62112385",
"0.6205976",
"0.61844945",
"0.6169179",
"0.611067",
"0.60501873",
"0.6024691",
"0.5989451",
"0.59104246",
"0.5877937",
"0.5877124",
"0.58605266",
"0.5851678",
"0.5838534",
"0.5806282",
"0.5746106",
"0.573468",
"0.57236755",
"0.57103765",
"0.5692827",
"0.56674784",
"0.56575453",
"0.56522924",
"0.563752",
"0.56328887",
"0.5620631"
] | 0.69199365 | 1 |
Writes the output in SLURM array job format. Creates sub shell scripts that contain the workflow for each input file separately. After this main shell script containing SLURM configuration is created. This script is responsible for starting the sub shells as separate processes. | def write_slurm(workloads, input_file_parameters, command_line_parameters):
workload_index = 0
workload_zfill_amount = len(str(len(workloads)))
workload_file_paths = []
for workload in workloads:
# Each workflow part will have separate file to submit to SLURM with
# sbatch command. Each file has one or more associated subshell files
# containing contents for each thread.
# Generate strings describing current workload and thread indexes for
# output file names
workload_index += 1
workload_index_string = str(workload_index).zfill(workload_zfill_amount)
file_main_name = '{0}_SBATCH_WORKLOAD_{1}'.format(NAME,
workload_index_string)
# When --fix_run mode is used the output and log files files already
# exist. To prevent overwriting these files with new ones specific
# prefix or appendix strings are added to the new output file names.
appendix = '.sh'
prefix = ''
i = 0
if command_line_parameters.fix_run:
mode = 'FIX'
elif command_line_parameters.compress_run == 'compress':
mode = 'COMPRESS'
elif command_line_parameters.compress_run == 'decompress':
mode = 'DECOMPRESS'
else:
mode = None
while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,
file_main_name + appendix)):
i += 1
prefix = '{0}_{1}_'.format(mode, i)
appendix = '_{0}_{1}.sh'.format(mode, i)
# Generate subshell files
thread_index = 0
for thread_contents in workload:
# Iterate over output commands of each thread and write necessary
# subshell files for each
out_lines = []
cmds_in_thread = len(thread_contents)
for i in xrange(cmds_in_thread):
# Check if any modules need loading or are they loaded by previous command
skip_module_loading = False
if i > 0:
if thread_contents[i].load_module == thread_contents[i-1].load_module:
skip_module_loading = True
# Check if any modules need unloading or will they be used by following command
skip_module_unloading = False
if i < cmds_in_thread-1:
if thread_contents[i].load_module == thread_contents[i+1].load_module:
skip_module_unloading = True
out_lines += generate_subshell_file_contents(thread_contents[i],
skip_module_loading,
skip_module_unloading)
# Write subshell file
thread_index += 1
thread_index_string = str(thread_index)
fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,
workload_index_string,
thread_index_string,
appendix)
try:
out_fl = open(os.path.join(input_file_parameters.output_dir,
fl_name), 'w')
except:
raise STAPLERerror.STAPLERerror('Unable to create output file:'
'\n{0}'.format(os.path.join(
input_file_parameters.output_dir,
fl_name)))
out_fl.write('\n'.join(out_lines))
out_fl.write('\n')
out_fl.close()
# Create lines for SLURM input file by generating job-name, output,
# error and array parameters based on user input
status_file_basename = os.path.join(input_file_parameters.output_dir,
prefix + input_file_parameters.job_name)
resmng_config = list(input_file_parameters.resource_manager_params)
resmng_config.append('#SBATCH --job-name={0}'.format(input_file_parameters.job_name))
resmng_config.append('#SBATCH --output={0}_%A_%a.out'.format(status_file_basename))
resmng_config.append('#SBATCH --error={0}_%A_%a.err'.format(status_file_basename))
resmng_config.append('#SBATCH --array={0}-{1}'.format(1, len(workload)))
resmng_config.append('\n\n')
subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,
workload_index_string,
'"$SLURM_ARRAY_TASK_ID"',
appendix)
subshell_file_path = os.path.join(input_file_parameters.output_dir,
subshell_file_path)
resmng_config.append('source {0}'.format(subshell_file_path))
out_fl_path = os.path.join(input_file_parameters.output_dir,file_main_name + appendix)
workload_file_paths.append(out_fl_path)
try:
out_fl = open(out_fl_path, 'w')
except IOError as emsg:
raise STAPLERerror.STAPLERerror('Unable to create output file:'
'\n{0}\n with error message:\n{1}'
.format(os.path.join(input_file_parameters.output_dir,
file_main_name + appendix),
str(emsg)))
out_fl.write('\n'.join(resmng_config))
out_fl.write('\n')
out_fl.close()
return workload_file_paths | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_sge(workloads, input_file_parameters, command_line_parameters):\r\n validate_resource_manager_parameters(\r\n input_file_parameters.resource_manager_params,\r\n ['# -o', '# -e', '# -t'])\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_SGE_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n prefix = ''\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n prefix = '{0}_{1}_'.format(mode, i)\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 1\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n thread_index += 1\r\n\r\n # Create lines for SGE input file by generating job-name, output,\r\n # error and array parameters based on user input\r\n\r\n status_file_basename = os.path.join(input_file_parameters.output_dir,\r\n prefix +\r\n input_file_parameters.job_name + '_$TASK_ID')\r\n\r\n # IF YOU ADD NEW AUTOMATICALLY INFERRED PARAMETERS, REMEMBER TO VALIDATE\r\n # THEM AT THE BEGINNING OF THIS FUNCTION\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('#$ -o {0}.out'.format(status_file_basename))\r\n resmng_config.append('#$ -e {0}.err'.format(status_file_basename))\r\n resmng_config.append('#$ -t {0}-{1}'.format(1, len(workload)))\r\n\r\n resmng_config.append('\\n\\n')\r\n subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n '\"$SGE_TASK_ID\"',\r\n appendix)\r\n subshell_file_path = os.path.join(input_file_parameters.output_dir,\r\n subshell_file_path)\r\n resmng_config.append('source {0}'.format(subshell_file_path))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def write_unix(workloads, input_file_parameters, command_line_parameters):\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n background_process_list = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_UNIX_WORKLOAD_1'.format(NAME)\r\n\r\n # Add information about current workflow to the main shell script\r\n background_process_list.append('echo \"Running workload part {0}\"'.format(\r\n workload_index))\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is 'FIX' and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n if mode in ('COMPRESS', 'DECOMPRESS'):\r\n appendix = '_{0}.sh'.format(mode)\r\n while os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n thread_zfill_amount = len(str(len(workload)))\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index).zfill(thread_zfill_amount)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n # i.e. use UNIX source to run input shell script, redirect stdout\r\n # and stderr to an .out file.\r\n background_process_list.append('source {0} >> {0}.out 2>&1 &'.format(\r\n os.path.join(input_file_parameters.output_dir,\r\n fl_name)))\r\n thread_index += 1\r\n\r\n # Workflow steps are written to a single output file (instead of\r\n # separate files). \"wait\" command is inserted in between workflow parts\r\n # to synchronize workflows.\r\n background_process_list.append('wait\\n\\n')\r\n\r\n # Write the main shell script file\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('\\n\\n')\r\n resmng_config.append('\\n'.join(background_process_list))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir, file_main_name + appendix)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return [out_fl_path]",
"def write_lsf(workloads, input_file_parameters, command_line_parameters):\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_LSF_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n thread_index += 1\r\n\r\n # Generate parameter file for the bsub run\r\n resmng_config = []\r\n resmng_config.append('#BSUB-J \"{0}[1-{1}]\"'.format(\r\n input_file_parameters.job_name,\r\n len(workload)))\r\n resmng_config.append('#BSUB-i {0}_WORKLOAD_{1}_subshell_{2}{3}'.format(\r\n NAME,\r\n workload_index_string,\r\n '%I',\r\n appendix))\r\n resmng_config.append('#BSUB-o {0}_WORKLOAD_{1}_subshell_{2}{3}.out'.format(\r\n NAME,\r\n workload_index_string,\r\n '%I',\r\n appendix))\r\n resmng_config += input_file_parameters.resource_manager_params\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir, file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def write_default(workflows, output_dir):\r\n\r\n # Calculate the total number of commands\r\n number_of_commands = 0\r\n for workflow in workflows:\r\n number_of_commands += sum(map(len, workflow))\r\n\r\n # Create command line strings\r\n i = 0\r\n out_lines = ['echo Started executing shell script at:', 'date']\r\n for workflow in workflows:\r\n for workflow_step in workflow:\r\n for cmd in workflow_step:\r\n i += 1\r\n cmd_list = cmd.command_lines\r\n cmd_list = map(clean_command_lines, cmd_list)\r\n out_lines.append('echo Executing command {0}/{1}:'\r\n .format(i, number_of_commands))\r\n for c in cmd_list:\r\n c = c.replace('>', '\\\\>')\r\n c = c.replace('|', '\\\\|')\r\n out_lines.append('echo ' + c)\r\n out_lines.append('date')\r\n\r\n #Load modules\r\n if cmd.load_module:\r\n for module in cmd.load_module:\r\n out_lines.append(module)\r\n\r\n #The command\r\n out_lines += cmd_list\r\n\r\n #Unload modules\r\n if cmd.unload_module:\r\n for module in cmd.unload_module:\r\n out_lines.append(module)\r\n out_lines.append('echo Finished at:')\r\n out_lines.append('date')\r\n\r\n #Open and write command lines\r\n fl_name = '{0}_output_{1}.sh'.format(NAME, START_TIME)\r\n output_file_path = os.path.join(output_dir, fl_name)\r\n try:\r\n out_fl = open(output_file_path, 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(output_dir,\r\n fl_name)))\r\n out_fl.write('#!/usr/bin/env bash\\n')\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.close()\r\n return [output_file_path]",
"def gen_jobs(fpath, num_runs, netid):\n\n run = \"\"\n run += \"import sys\\n\"\n run += \"import subprocess\\n\"\n run += \"cmd_array = (\"\n for i in range(num_runs):\n run += \"r\\\"python test.py %d\\\"\" % i\n run += \",\\n\"\n\n run += \")\\n\"\n run += \"p = subprocess.Popen(cmd_array[int(sys.argv[1])-1], shell=True, stdout=subprocess.PIPE)\\n\"\n run += \"out = p.stdout.read()\"\n# run += \"print cmd_array[int(sys.argv[1])]\"\n\n script_name = \"test\"\n\n if verbose:\n print \"Writing array script: \" + \"run.\" + script_name + \".py\"\n f = open(os.path.join(fpath, \"run.\" + script_name + \".py\"), 'w')\n f.write(\"%s\\n\" % run)\n\n f = open(os.path.join(fpath, \"submit_run.\" + script_name + \".sh\"), 'w')\n submit_run = \"#!/bin/csh\\n\"\n submit_run += \"#$ -N %s\\n\" % (\"job_%d\" % num_runs)\n submit_run += \"#$ -t 1:%d\\n\" % (num_runs)\n submit_run += \"#$ -M %[email protected]\\n\\n\" % (netid)\n# submit_run += \"#$ -q short\"\n# submit_run += \"#$ -r y\"\n submit_run += \"python run.%s.py ${SGE_TASK_ID}\" % (script_name)\n\n if verbose:\n print \"Writing submit shell script: \" + \"submit_run.\" + script_name + \".sh\"\n f.write(\"%s\\n\" % submit_run)",
"def create_slurm_file(\n slurm_filepath: Path, batch_size: int, num_batches: int, time_limit: int\n):\n slurm_string = f\"\"\"#!/usr/bin/bash\n#SBATCH --job-name=pctsp\n#SBATCH --partition=cpu-batch\n#SBATCH --ntasks=10\n#SBATCH --cpus-per-task=1\n#SBATCH --mem-per-cpu=4000\n#SBATCH --time={time_limit}:00:00\n#SBATCH --array=0-{num_batches-1}\n\n## Loop over each batch ##\nstart=$(($SLURM_ARRAY_TASK_ID * {batch_size}))\nsrun --ntasks=1 python scripts/batch_model.py $start {batch_size} \\\n\"\"\"\n slurm_filepath.write_text(slurm_string)",
"def prepare_parafly_slurm_job_script(sBasename_job, sBasename_parafly, sDirectory_job, sEmail, iWalltime_in = None, nNode_in = None, nThread_in=None, sJob_name_in =None, sPython_env_in =None, sQueue_in=None):\n if iWalltime_in is not None:\n iWalltime = iWalltime_in \n else:\n iWalltime = 2\n if nNode_in is not None:\n iNode = nNode_in \n else:\n iNode = 1\n if nThread_in is not None:\n nThread = nThread_in \n else:\n nThread = 40\n \n if sJob_name_in is not None:\n sJob_name = sJob_name_in \n else:\n sJob_name = 'parafly'\n if sPython_env_in is not None:\n sPython_env = sPython_env_in \n else:\n sPython_env = 'base'\n \n if sQueue_in is not None:\n sQueue = sQueue_in \n else:\n sQueue = 'short'\n \n sWalltime =\"{:0d}\".format(iWalltime )\n sNode = \"{:0d}\".format(iNode )\n sThread = \"{:0d}\".format(nThread )\n \n os.chdir(sDirectory_job)\n \n ofs = open(sBasename_job,\"w\") #write mode \n sLine = '#!/bin/bash' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --account=esmd' + '\\n'\n ofs.write( sLine ) \n\n #sLine = '#SBATCH --begin=now+1minutes' + '\\n'\n #ofs.write( sLine ) \n\n sLine = '#SBATCH --cpus-per-task=1 ' + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --dependency=singleton ' + '\\n'\n ofs.write( sLine )\n sLine = '#SBATCH --error=stderr_%j.err' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --job-name=' + sJob_name + ' # create a name for your job' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --mail-type=ALL' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --mail-user=' + sEmail + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --nodes=' + sNode + ' # node count' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --ntasks=' + sThread + ' # total number of tasks' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --output=stdout_%j.out' + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --partition=' + sQueue + '\\n' #can be improved here\n ofs.write( sLine ) \n sLine = '#SBATCH --time=' + sWalltime +':00:00 # total run time limit (HH:MM:SS)' + '\\n'\n ofs.write( sLine ) \n\n sLine = 'module purge' + '\\n'\n ofs.write( sLine ) \n sLine = 'module load parafly/2013' + '\\n'\n ofs.write( sLine ) \n sLine = 'module load anaconda3/2019.03' + '\\n'\n ofs.write( sLine ) \n sLine = 'source /share/apps/anaconda3/2019.03/etc/profile.d/conda.sh' + '\\n'\n ofs.write( sLine ) \n sLine = 'unset PYTHONHOME' + '\\n'\n ofs.write( sLine ) \n sLine = 'conda activate ' + sPython_env + '\\n'\n ofs.write( sLine ) \n\n sLine = 'ParaFly -c ' + sBasename_parafly + ' -CPU ' + sThread + ' -failed_cmds rerun.txt' + '\\n'\n ofs.write( sLine ) \n \n sLine = 'echo \" Job \" ' + '${SLURM_JOBID}' + ' is launched' + '\\n'\n ofs.write( sLine ) \n\n sLine = 'conda deactivate' + '\\n'\n ofs.write( sLine ) \n \n sLine = 'echo \"Finished\"' + '\\n'\n ofs.write( sLine ) \n ofs.close() \n \n return",
"def build_job_scripts(model_list, scenario_list, output_dir, cassandra_config_dir, cassandra_log_dir,\n cassandra_main_script, sbatch_account, sbatch_partition='slurm', sbatch_walltime='01:00:00',\n sbatch_ntasks=3, sbatch_nodes=3, sbatch_jobname='cassie', sbatch_logdir='.', template=None):\n\n # use default configuration template file if user does not give one\n if template is None:\n template = pkg_resources.resource_filename('cassie', 'data/sbatch_template.sh')\n\n # existing tags to replace in the template file\n model_tag = '<model>'\n scenario_tag = '<scenario>'\n account_tag = '<account>'\n partition_tag = '<partition>'\n ntasks_tag = '<ntasks>'\n nodes_tag = '<nodes>'\n time_tag = '<walltime>'\n jobname_tag = '<jobname>'\n logdir_tag = '<logdir>'\n cassandra_configdir_tag = '<cassconfigdir>'\n cassandra_logdir_tag = '<casslogdir>'\n cassandra_script_tag = '<cassmainscript>'\n\n for model in model_list:\n for scenario in scenario_list:\n\n output_file = os.path.join(output_dir, f'run_{model.lower()}_{scenario}.sh')\n\n with open(output_file, 'w') as out:\n with open(template) as get:\n\n f = get.read()\n\n # replace tag names with dynamic content\n fx = f.replace(model_tag, model)\n fx = fx.replace(scenario_tag, scenario)\n\n fx = fx.replace(account_tag, sbatch_account)\n fx = fx.replace(partition_tag, sbatch_partition)\n fx = fx.replace(ntasks_tag, str(sbatch_ntasks))\n fx = fx.replace(nodes_tag, str(sbatch_nodes))\n fx = fx.replace(time_tag, sbatch_walltime)\n fx = fx.replace(jobname_tag, sbatch_jobname)\n fx = fx.replace(logdir_tag, sbatch_logdir)\n\n fx = fx.replace(cassandra_configdir_tag, cassandra_config_dir)\n fx = fx.replace(cassandra_logdir_tag, cassandra_log_dir)\n fx = fx.replace(cassandra_script_tag, cassandra_main_script)\n\n out.write(fx)",
"def write_shell_scripts(airfoils, qsh_template, nsetup, ntype, out_dir):\n for nairfoil, sim_setup in airfoils.iteritems():\n for aoa in sim_setup['aoas']:\n # Create simulation name\n sim_name = create_sim_name(nairfoil, ntype, nsetup, aoa)\n # Create fluent journal file\n with open(qsh_template, 'r') as f:\n qtxt = f.read()\n # Start to replace parameters inside the journal\n qtxt = qtxt.replace('SIMNAME', sim_name)\n qtxt = qtxt.replace('in.jou', sim_name + '.jou')\n qtxt = qtxt.replace('fluent.out', sim_name + '.out')\n # Write new shell script to out_dir\n qout = sim_name + '.qsh'\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n with open(os.path.join(out_dir, qout), 'w') as f:\n f.write(qtxt)\n return True",
"def write_torque(workloads, input_file_parameters, command_line_parameters):\r\n validate_resource_manager_parameters(\r\n input_file_parameters.resource_manager_params,\r\n ['#PBS -k', '#PBS -N', '#PBS -d', '#PBS -e', '#PBS -t'])\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_TORQUE_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n thread_index += 1\r\n\r\n # Create lines for TORQUE input file by generating job-name, output,\r\n # error and array parameters based on user input\r\n\r\n # IF YOU ADD NEW AUTOMATICALLY INFERRED PARAMETERS, REMEMBER TO VALIDATE\r\n # THEM AT THE BEGINNING OF THIS FUNCTION\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('#PBS -k eo')\r\n resmng_config.append('#PBS -N {0}'.format(input_file_parameters.job_name))\r\n resmng_config.append('#PBS -d {0}'.format(input_file_parameters.output_dir))\r\n resmng_config.append('#PBS -e {0}'.format(input_file_parameters.output_dir))\r\n resmng_config.append('#PBS -t {0}-{1}'.format(0, len(workload)-1))\r\n\r\n resmng_config.append('\\n\\n')\r\n subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n '\"${PBS_ARRAYID}\"',\r\n appendix)\r\n subshell_file_path = os.path.join(input_file_parameters.output_dir,\r\n subshell_file_path)\r\n resmng_config.append('source {0}'.format(subshell_file_path))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir,file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def write_pbs(self):\n fout = open(\"runStarCCM.pbs\", \"w\")\n fout.write(\"#PBS -S /bin/csh\\n\")\n fout.write(\"#PBS -l select=\" + str(self.numNodes) + \":ncpus=\" + str(self.numCPUs) + \":mpiprocs=\" + str(self.mpiProcs) + \":model=has,walltime=\" + self.WallTime + \"\\n\\n\")\n fout.write(\"#PBS -W group_list=\" + self.GroupID + \"\\n\")\n fout.write(\"#PBS -j oe\\n\")\n fout.write(\"#PBS -q \" + self.queue + \"\\n\")\n fout.write(\"#PBS -N \" + self.jobName + \"\\n\")\n fout.write(\"#PBS -m e\\n\")\n fout.write(\"#PBS -W block=true\\n\\n\")\n fout.write(\"cd $PBS_O_WORKDIR\\n\")\n\n if self.runVolGrid == 1:\n #fout.write(\"/bin/rm -f \" + self.simMeshFile + \".sim\\n\")\n fout.write(\"/bin/rm -f starccmMeshRun.out\\n\")\n fout.write(\"chmod u+x \" + self.cshBatch1File + \".csh\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch1File + \".csh -powerOnDemand \" + self.javaBatch1File + \".java >& starccmMeshRun.out\\n\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a mesh run.'\\n\")\n\n if self.runCFD == 1:\n fout.write(\"chmod u+x \" + self.cshBatch2File + \".csh\\n\")\n fout.write(\"/bin/rm -f *.csv *.png starccmFlowRun.out\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch2File + \".csh -powerOnDemand \" + self.javaBatch2File + \".java \" + self.simMeshFile + \" >& starccmFlowRun.out\\n\\n\")\n fout.write(\"# rename the strange file names\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.csv ForceX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.csv ForceY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.csv ForceZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.csv MomentX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.csv MomentY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.csv MomentZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.csv Residuals.csv\\n\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.png ForceX.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.png ForceY.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.png ForceZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.png MomentX.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.png MomentY.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.png MomentZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.png Residuals.png\\n\")\n fout.write(\"/bin/mv \\$PWDUpperCp.png UpperCp.png\\n\")\n fout.write(\"/bin/mv \\$PWDLowerCp.png LowerCp.png\\n\")\n fout.write(\"/bin/rm -rf null\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a CFD run.'\\n\")\n\n fout.close()",
"def launchJobs(options, cmdargs, errStream=sys.stdin):\n\n if options.queue == LOCAL:\n launchLocalJobs(options,cmdargs,errStream)\n return\n\n logging.debug(\"Launching task array: %r\" % ({'tmpDir':options.tmpDir,'splits':options.splits,'fragName':options.fragBase,'cmd':cmdargs,'sgeOpts':options.sgeOptions,'job':options.jobName,'priority':options.priority,'loglevel':options.verbose,'wait':options.wait, 'type':options.taskType}))\n \n # SGE or SLURM submission prefix\n command = getSubmissionCommandPrefix(options)\n\n # batch_runner command\n command.append(BATCHLAUNCHER)\n command+=[\"--mode\",\"run\",\"--tmp_dir\",options.tmpDir,\"--frag_base\",\n options.fragBase, \"--frag_dir\", options.frag_dir, \"--frag_suffix\", options.fragSuff, \"--loglevel\", str(options.verbose), \"--queue\", options.queue]\n if options.inputFlag is not None:\n command.append('-i=%s' % (options.inputFlag))\n if options.prefixFlag is not None:\n command.append('-p=%s' % (options.prefixFlag))\n if options.threadsFlag is not None:\n command+=['-t',str(options.threadsFlag)]\n if options.outputFlags is not None:\n for flag in options.outputFlags:\n command.append('-o=%s' % (flag))\n if options.taskType is not None:\n command+=['--taskType',options.taskType]\n if options.cwd:\n command.append('--cwd')\n command.append('--')\n command+=cmdargs\n\n # redirect qsub output to std, silence if vebose is 0\n #if options.verbose==0:\n # qsubOuts=open(os.devnull,'w')\n #else:\n # qsubOuts=errStream\n \n # run command\n logging.debug('Launching task array: %s' % (formatCommand(command)))\n try:\n submissionOutput = subprocess.check_output(command)\n try:\n submissionOutput = submissionOutput.decode()\n except:\n pass\n if options.verbose>0:\n errStream.write(\"Submission Output: \" + submissionOutput)\n except subprocess.CalledProcessError as error:\n if options.wait and options.queue != SLURM:\n # when using -sync y, the exit code may come from a task\n # (which cleanup will handle)\n logging.warning(\"qsub returned an error code of: %d\" \n % error.returncode)\n else:\n raise error\n\n # get job id\n try:\n jobid = re.search(r'(\\d+)\\s*$',submissionOutput).group(1)\n options.jobid = jobid\n except:\n if options.queue==SLURM:\n logging.error(\"Cannot parse SLURM job id from '%s'\" % (submissionOutput))\n raise\n\n # SLURM doesn't allow waiting for completion on array jobs, so we hack:\n # use srun to start a dummy job that will wait for our job array\n if options.wait and options.queue==SLURM:\n waitForSlurmArray(options, errStream)",
"def create_job(jobrun, vcf_filenames):\n if jobrun == \"cluster\":\n \"\"\"\n Supports only PBS clusters for now.\n \"\"\"\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M [email protected]\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n for i in pbs_scripts:\n print \"Running: qsub %s\" % i\n #os.system(\"qsub %s\" % i)\n\n elif jobrun == \"parallel-local\":\n \"\"\"\n Generate a Command list of each job and run it in parallel on different cores available on local system\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n f3 = open(command_file, 'w+')\n\n\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M [email protected]\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n\n\n for i in pbs_scripts:\n f3.write(\"bash %s\\n\" % i)\n f3.close()\n with open(command_file, 'r') as fpp:\n for lines in fpp:\n lines = lines.strip()\n command_array.append(lines)\n fpp.close()\n print len(command_array)\n if args.numcores:\n num_cores = int(num_cores)\n else:\n num_cores = multiprocessing.cpu_count()\n results = Parallel(n_jobs=num_cores)(delayed(run_command)(command) for command in command_array)\n\n elif jobrun == \"parallel-single-cluster\":\n print \" \"\n else:\n \"\"\"\n Generate a Command list of each job and run it on local system one at a time\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n os.system(\"bash %s\" % command_file)",
"def write_slurm_runjob(name, ntasks, pmem, walltime, binary):\n\n nnodes = int(np.ceil(float(ntasks) / 32.0))\n\n runjob = open('runjob', 'w')\n runjob.write('#!/bin/bash\\n')\n runjob.write('#SBATCH --job-name={}\\n'.format(name))\n runjob.write('#SBATCH -o out_%j.log\\n')\n runjob.write('#SBATCH -e err_%j.log\\n')\n runjob.write('#SBATCH --qos=hennig-b\\n')\n runjob.write('#SBATCH --nodes={}\\n'.format(nnodes))\n runjob.write('#SBATCH --ntasks={}\\n'.format(ntasks))\n runjob.write('#SBATCH --mem-per-cpu={}\\n'.format(pmem))\n runjob.write('#SBATCH -t {}\\n\\n'.format(walltime))\n runjob.write('cd $SLURM_SUBMIT_DIR\\n\\n')\n runjob.write('module load intel/2016.0.109\\n')\n runjob.write('module load openmpi/1.10.1\\n')\n runjob.write('module load vasp/5.4.1\\n\\n')\n runjob.write('mpirun {} > job.log\\n\\n'.format(binary))\n runjob.write('echo \\'Done.\\'\\n')\n runjob.close()",
"def make_jobscript(i_file, atm_file=None, dt_file=None, dp_file=None,\n fp_file=None, pp_file=None, sd_file=None, dsl_file=None,\n om_file=None, extra_vars=extra_vars,\n lapse_rate=6.0, ys=0.0, ye=1000.0, yts=10, yextra=100,\n mpi_exec=mpi_exec, pism_exec=pism_exec, pism_root=pism_root,\n nodes=1, time='24:00:00', out_dir=None, job_name='unnamed',\n ntasks_per_node=36, **boot_kwargs):\n\n # expand path to PISM root\n pism_root = os.path.abspath(pism_root)\n\n # get input and component model arguments\n input_args = get_input_args(i_file, pism_root=pism_root, **boot_kwargs)\n atm_args = get_atm_args(atm_file=atm_file, lapse_rate=lapse_rate,\n dt_file=dt_file, dp_file=dp_file, fp_file=fp_file,\n pp_file=pp_file, pism_root=pism_root)\n surface_args = get_surface_args(sd_file=sd_file, pism_root=pism_root)\n ocean_args = get_ocean_args(dsl_file=dsl_file, om_file=om_file, pism_root=pism_root)\n\n # format script\n script = template.format(**locals())\n\n # write script to file\n script_path = os.path.join(out_dir, 'job.' + job_name + '.sh')\n with open(script_path, 'w') as f:\n f.write(script)\n\n # return path to job script\n return script_path",
"def scriptGen(self,tmpd='/tmp/jose',libRev='last',submode='qsub',\n redirect=1,PBSoptions=''):\n jobname=self.name\n outdir=self.outd\n qsubdir=scratchdir+'/qsub/'+todayDate() #subdirectory to deposit the script\n if not os.path.exists(qsubdir): pastry('/bin/mkdir -p '+qsubdir)\n script=qsubdir+'/'+jobname+'.sh' #full script file name\n\n if len(jobname) > 15:\n sys.stderr.write('Error: job name '+jobname+' cannot exceed 15 characters')\n return ''\n if not os.path.exists(outdir): os.system('/bin/mkdir -p '+outdir)\n buf=''\n ulimit=int(float(mem_limit)*1024) #maximum resident memory size (Kb) to prevent swapping\n wd=tmpd+'/${PBS_JOBID}'\n #wd=tmpd+'/'+ re.compile('\\W').sub('',self.name) +'_$$' #working directory\n logname=jobname+'.log'\n local_log=wd+'/'+logname\n remote_log=outdir+'/'+logname\n buf= '#!/bin/bash\\n\\n'\n buf+= PBSoptions+'\\n\\n'\n buf+= '#bash function to update library\\n'\n buf+= self.updateNodeLib(libRev)+'\\n\\n'\n buf+= '#bash function to import temporary libs\\n'\n buf+= self.shared_temporal_libraries()+'\\n\\n'\n buf+= '#bash function to clean exit\\n'\n buf+= self.cleanup_exit(submode=submode)+'\\n\\n'\n buf+= 'echo \"'+script+'\"\\n' #write script name withing script body\n buf+= 'hostname\\n' #node where job will be run\n buf+= 'echo $PBS_JOBID\\n'\n buf+= 'ulimit -m '+`ulimit`+' #maximum memory\\n'\n buf+= 'source ~/.bash_profile >/dev/null #environment variables\\n'\n buf+= 'wd='+wd+' #working directory\\n'\n buf+= '/bin/mkdir -p $wd\\n'\n buf+= 'export LOCAL_LOG=\"'+local_log+'\"\\n'\n buf+= '/bin/touch $LOCAL_LOG\\n'\n if submode=='sub' and redirect:\n buf+='exec &> $LOCAL_LOG #redirect STODOUT, STDERR to LOCAL_LOG\\n' \n buf+= 'export REMOTE_LOG=\"'+remote_log+'\"\\n'\n\n but+= '#clean up old log file\\n'\n buf+= 'if [ -f $REMOTE_LOG ]; then\\n' \n buf+= ' /bin/rm -f $REMOTE_LOG\\n'\n buf+= 'fi\\n\\n'\n\n buf+= 'trap \"cleanup_exit 1\" TERM #in case of killing job\\n\\n'\n\n buf+= '#update node code library && import libraries\\n'\n buf+= 'if !('\n buf+= 'updateNodeLib && ' \n buf+= 'shared_temporal_libraries _PREPARE_'\n buf+= ');then\\n'\n buf+= ' cleanup_exit 1\\n'\n buf+= 'fi\\n\\n'\n \n buf+= '/bin/cp '+' '.join(self.inpl)+' $wd #bring input files\\n' \n buf+= 'cd $wd\\n\\n'\n buf+= '#Test command success\\n'\n buf+= 'exs=0 #variable holding script exit status\\n'\n buf+= 'if !('\n buf+= self.exe\n buf+= ');then\\n'\n buf+= ' exs=1\\n'\n buf+= 'fi\\n\\n'\n buf+= '#move even partial results (exs=1)\\n'\n buf+= '/bin/mv '+' '.join(self.outl)+' '+outdir+'\\n'\n buf+= 'cleanup_exit $exs'\n\n open(script,'w').write(buf)\n pastry('chmod u+x '+script)\n\n return script",
"def create_script(sh_file, cmds, max_workers, num_nodes=1):\n output = os.path.dirname(sh_file)\n job_name = os.path.splitext(os.path.basename(sh_file))[0]\n err_file = os.path.join(output,\"{0}.error\".format(job_name))\n complete_file = os.path.join(output, \"{0}.complete\".format(job_name))\n with open(sh_file, 'w') as of:\n of.write(\"#!/bin/bash\\n\")\n of.write(\"#PBS -N {0}\\n\".format(job_name))\n of.write(\"#PBS -l nodes={0}:ppn={1}\\n\".format(num_nodes,max_workers))\n of.write(\"#PBS -l walltime=2:30:00\\n\")\n of.write(\"#PBS -l vmem=8g\\n\")\n of.write(\"#PBS -j eo\\n\")\n of.write(\"#PBS Join_Path={0}\\n\".format(os.path.join(output,\"%s.err\"%job_name)))\n of.write(\"module load samtools/1.9\\n\")\n of.write(\"module load bedtools/2.27.1\\n\")\n of.write(\"{0}\\n\".format(cmds[0]))\n of.write(\"if [ $? -ne 0 ]; then \\n\\ttouch {0};exit 1 \\nfi\\n\".format(err_file))\n of.write(\"{0}\\n\".format(cmds[1]))\n of.write(\"if [ $? -ne 0 ]; then \\n\\ttouch {0}\\nelse\\n\\ttouch {1} \\nfi\\n\".format(err_file, complete_file))\n os.system(\"chmod 755 %s\" % sh_file)",
"def run( **kwargs ):\n\n # combine options using correct preference\n options = dict(PARAMS.items())\n options.update( getCallerLocals().items() )\n options.update( kwargs.items() )\n\n def setupJob( session ):\n\n jt = session.createJobTemplate()\n jt.workingDirectory = os.getcwd()\n jt.jobEnvironment = { 'BASH_ENV' : '~/.bashrc' }\n jt.args = []\n jt.nativeSpecification = \"-V -q %s -p %i -N %s %s\" % \\\n (options.get(\"job_queue\", global_options.cluster_queue ),\n options.get(\"job_priority\", global_options.cluster_priority ),\n \"_\" + re.sub( \"[:]\", \"_\", os.path.basename(options.get(\"outfile\", \"ruffus\" ))),\n options.get(\"job_options\", global_options.cluster_options))\n\n # keep stdout and stderr separate\n jt.joinFiles=False\n\n return jt\n\n shellfile = os.path.join( os.getcwd(), \"shell.log\" )\n \n # run multiple jobs\n if options.get( \"statements\" ):\n\n statement_list = []\n for statement in options.get(\"statements\"): \n options[\"statement\"] = statement\n statement_list.append(buildStatement( **options))\n \n if options.get( \"dryrun\", False ): return\n\n # get session for process - only one is permitted\n pid = os.getpid()\n if pid not in global_sessions: \n\n L.debug( \"creating new drmaa session for pid %i\" % pid )\n global_sessions[pid]=drmaa.Session() \n global_sessions[pid].initialize()\n\n session = global_sessions[pid]\n \n jt = setupJob( session )\n \n jobids, filenames = [], []\n for statement in statement_list:\n # create job script\n tmpfile = tempfile.NamedTemporaryFile( dir = os.getcwd() , delete = False )\n tmpfile.write( \"#!/bin/bash\\n\" ) # -l -O expand_aliases\\n\" )\n tmpfile.write( 'echo \"START--------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( '''echo 'statement=%s' >> %s\\n''' % (statement, shellfile) )\n tmpfile.write( \"set &>> %s\\n\" % shellfile)\n tmpfile.write( \"module list &>> %s\\n\" % shellfile )\n tmpfile.write( 'echo \"END----------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( expandStatement(statement) + \"\\n\" )\n tmpfile.close()\n\n # build paths\n job_path = os.path.abspath( tmpfile.name )\n stdout_path = job_path + \".stdout\" \n stderr_path = job_path + \".stderr\" \n\n jt.remoteCommand = job_path\n jt.outputPath=\":\"+ stdout_path\n jt.errorPath=\":\" + stderr_path\n\n os.chmod( job_path, stat.S_IRWXG | stat.S_IRWXU )\n\n jobid = session.runJob(jt)\n jobids.append( jobid )\n filenames.append( (job_path, stdout_path, stderr_path) )\n\n L.debug( \"job has been submitted with jobid %s\" % str(jobid ))\n \n L.debug( \"waiting for %i jobs to finish \" % len(jobids) )\n session.synchronize(jobids, drmaa.Session.TIMEOUT_WAIT_FOREVER, False)\n \n # collect and clean up\n for jobid, statement, paths in zip( jobids, statement_list, filenames) :\n job_path, stdout_path, stderr_path = paths\n retval = session.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)\n\n stdout, stderr = getStdoutStderr( stdout_path, stderr_path )\n\n if retval.exitStatus != 0:\n raise PipelineError( \"---------------------------------------\\n\"\n \"Child was terminated by signal %i: \\n\"\n \"The stderr was: \\n%s\\n%s\\n\" \n \"---------------------------------------\\n\" % \\\n (retval.exitStatus, \n \"\".join( stderr),\n statement ) )\n\n os.unlink( job_path )\n \n session.deleteJobTemplate(jt)\n\n # run a single parallel job\n elif (options.get( \"job_queue\" ) or options.get( \"to_cluster\" )) and not global_options.without_cluster:\n\n statement = buildStatement( **options )\n\n if options.get( \"dryrun\", False ): return\n\n tmpfile = tempfile.NamedTemporaryFile( dir = os.getcwd() , delete = False )\n tmpfile.write( \"#!/bin/bash\\n\" ) # -l -O expand_aliases\\n\" )\n\n tmpfile.write( 'echo \"START--------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( 'echo \"statement=%s\" >> %s\\n' % (statement, shellfile) )\n tmpfile.write( \"set &>> %s\\n\" % shellfile)\n tmpfile.write( \"module list &>> %s\\n\" % shellfile )\n tmpfile.write( 'echo \"END----------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( expandStatement( statement ) + \"\\n\" )\n tmpfile.close()\n\n job_path = os.path.abspath( tmpfile.name )\n stdout_path = job_path + \".stdout\" \n stderr_path = job_path + \".stderr\" \n\n os.chmod( job_path, stat.S_IRWXG | stat.S_IRWXU )\n\n # get session for process - only one is permitted\n pid = os.getpid()\n if pid not in global_sessions:\n L.debug( \"creating new drmaa session for pid %i\" % pid )\n global_sessions[pid]=drmaa.Session() \n global_sessions[pid].initialize()\n\n session = global_sessions[pid]\n\n jt = setupJob( session )\n\n jt.remoteCommand = job_path\n # later: allow redirection of stdout and stderr to files; can even be across hosts?\n jt.outputPath=\":\"+ stdout_path\n jt.errorPath=\":\" + stderr_path\n\n if \"job_array\" in options and options[\"job_array\"] != None:\n # run an array job\n start, end, increment = options.get(\"job_array\" )\n L.debug(\"starting an array job: %i-%i,%i\" % (start, end, increment ))\n # sge works with 1-based, closed intervals\n jobids = session.runBulkJobs( jt, start+1, end, increment )\n L.debug( \"%i array jobs have been submitted as jobid %s\" % (len(jobids), jobids[0]) )\n retval = session.synchronize(jobids, drmaa.Session.TIMEOUT_WAIT_FOREVER, True)\n else:\n jobid = session.runJob(jt)\n L.debug( \"job has been submitted with jobid %s\" % str(jobid ))\n try:\n retval = session.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)\n except Exception, msg:\n # ignore message 24 in PBS\n # code 24: drmaa: Job finished but resource usage information and/or termination status could not be provided.\":\n if not msg.message.startswith(\"code 24\"): raise\n retval = None\n\n stdout, stderr = getStdoutStderr( stdout_path, stderr_path )\n\n if \"job_array\" not in options:\n if retval and retval.exitStatus != 0:\n raise PipelineError( \"---------------------------------------\\n\"\n \"Child was terminated by signal %i: \\n\"\n \"The stderr was: \\n%s\\n%s\\n\"\n \"-----------------------------------------\" % \\\n (retval.exitStatus, \n \"\".join( stderr), statement))\n \n session.deleteJobTemplate(jt)\n os.unlink( job_path )\n\n else:\n statement = buildStatement( **options )\n\n if options.get( \"dryrun\", False ): return\n \n if \"<(\" in statement:\n if \"'\" in statement: raise ValueError( \"advanced bash syntax combined with single quotes\" )\n statement = \"\"\"/bin/bash -c '%s'\"\"\" % statement\n\n process = subprocess.Popen( expandStatement( statement ),\n cwd = os.getcwd(), \n shell = True,\n stdin = subprocess.PIPE,\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE )\n\n # process.stdin.close()\n stdout, stderr = process.communicate()\n\n if process.returncode != 0:\n raise PipelineError( \"---------------------------------------\\n\"\n \"Child was terminated by signal %i: \\n\"\n \"The stderr was: \\n%s\\n%s\\n\"\n \"-----------------------------------------\" % \\\n (-process.returncode, stderr, statement ))",
"def add_to_slurm_queue(collection, exps_list, unobserved=False, post_mortem=False,\n output_to_file=True, output_to_console=False, srun=False,\n debug_server=False):\n\n nexps = len(exps_list)\n exp_chunks = chunk_list(exps_list)\n exp_arrays = batch_chunks(exp_chunks)\n njobs = len(exp_chunks)\n narrays = len(exp_arrays)\n\n logging.info(f\"Starting {nexps} experiment{s_if(nexps)} in \"\n f\"{njobs} Slurm job{s_if(njobs)} in {narrays} Slurm job array{s_if(narrays)}.\")\n\n for exp_array in exp_arrays:\n sbatch_options = exp_array[0][0]['slurm']['sbatch_options']\n job_name = get_exp_name(exp_array[0][0], collection.name)\n set_slurm_job_name(sbatch_options, job_name, exp_array[0][0])\n if srun:\n assert len(exp_array) == 1\n assert len(exp_array[0]) == 1\n seml_arguments = []\n seml_arguments.append(\"--debug\")\n if post_mortem:\n seml_arguments.append(\"--post-mortem\")\n if output_to_console:\n seml_arguments.append(\"--output-to-console\")\n if not output_to_file:\n seml_arguments.append(\"--no-file-output\")\n if debug_server:\n seml_arguments.append(\"--debug-server\")\n start_srun_job(collection, exp_array[0][0], unobserved,\n srun_options=sbatch_options,\n seml_arguments=seml_arguments)\n else:\n if output_to_file:\n output_dir_path = get_output_dir_path(exp_array[0][0])\n else:\n output_dir_path = \"/dev/null\"\n assert not post_mortem\n start_sbatch_job(collection, exp_array, unobserved,\n name=job_name, output_dir_path=output_dir_path,\n sbatch_options=sbatch_options,\n max_simultaneous_jobs=exp_array[0][0]['slurm'].get('max_simultaneous_jobs'),\n debug_server=debug_server)",
"def create_batch_config(slurm_config):\n\n # magic number\n b = \"#!/bin/bash%s\" % sl\n\n #########################\n # auto-generated header #\n #########################\n b += \"######################################################%s\" % sl\n b += \"# WARNING - AUTO GENERATED FILE%s\" % sl\n b += \"# Please don't modify that file manually%s\" % sl\n b += \"######################################################%s\" % sl\n\n ######################\n # node configuration #\n ######################\n # job name\n b += \"#SBATCH --job-name=\\\"%s%d\\\"%s\" % (slurm_config.job_name,\n slurm_config.job_number, sl)\n\n # number of nodes required to execute the job\n b += \"#SBATCH --nodes=%s%s\" % (slurm_config.num_nodes, sl)\n\n # number of cpus per tasks\n b += \"#SBATCH --cpus-per-task=%s%s\" % (slurm_config.num_cpus_per_task, sl)\n\n # number of tasks\n b += \"#SBATCH --ntasks=%s%s\" % (slurm_config.num_tasks_per_node, sl)\n\n # memory required per task in Mbytes\n b += \"#SBATCH --mem=%s%s\" % (slurm_config.memory_mb, sl)\n\n # slurm session time\n b += \"#SBATCH --time=%s%s\" % (slurm_config.session_time, sl)\n\n # job partition\n b += \"#SBATCH --partition=%s%s\" % (slurm_config.partition, sl)\n\n # job account\n b += \"#SBATCH --account=%s%s\" % (slurm_config.project_name, sl)\n\n # On which nodes, this job will be executed\n # This option is used if the required modules are installed on a specific\n # node\n # b += \"#SBATCH --nodelist=%s%s\" % (slurm_config.node_list, sl)\n\n #####################\n # user notification #\n #####################\n if slurm_config.enable_email_notification:\n b += \"#SBATCH --mail-type=ALL%s\" % sl\n b += \"#SBATCH --mail-user=%s%s\" % (slurm_config.user_email, sl)\n\n ##################\n # log generation #\n ##################\n if slurm_config.enable_logs:\n std_out = \"%s/slurm-stdout_%d.log\" % \\\n (slurm_config.log_files_path, slurm_config.job_number)\n std_err = \"%s/slurm-stderr_%d.log\" % \\\n (slurm_config.log_files_path, slurm_config.job_number)\n b += \"#SBATCH --output=%s%s\" % (std_out, sl)\n b += \"#SBATCH --error=%s%s\" % (std_err, dl)\n\n ####################\n # System variables #\n ####################\n # slurm profile\n b += \"# Loading profiles%s\" % sl\n b += \"%s%s\" % (slurm_config.profile, dl)\n\n # job home\n b += \"#JOB_HOME=\\\"%s\\\"%s\" % (slurm_config.execution_path, sl)\n\n # KERBEROS renewal\n b += \"# Renewal of KERBEROS periodically for the length of the job%s\" % sl\n b += \"krenew -b -K 30%s\" % dl\n\n # slurm modules\n b += \"# Loading the modules.%s\" % sl\n b += \"%s%s\" % (slurm_config.modules, dl)\n\n # environmental variables\n b += \"# Setting the environmental variables.%s\" % sl\n b += \"export PATH=%s:$PATH%s\" % (slurm_config.env_path, sl)\n b += \"export LD_LIBRARY_PATH=%s:$LD_LIBRARY_PATH%s\" % \\\n (slurm_config.env_ld_library_path, sl)\n b += \"export PYTHONPATH=%s:$PYTHONPATH%s\" % (slurm_config.env_python_path,\n dl)\n # node list\n b += \"echo \\\"On which node your job has been scheduled :\\\"%s\" % sl\n b += \"echo $SLURM_JOB_NODELIST%s\" % dl\n\n # shell limits\n b += \"echo \\\"Print current shell limits :\\\"%s\" % sl\n b += \"ulimit -a%s\" % dl\n\n # running the serial tasks.\n b += \"echo \\\"Now run your serial tasks ...\\\"%s\" % sl\n b += \"cd %s%s\" % (slurm_config.execution_path, dl)\n ####################################################################\n\n return b",
"def write_merge_script(s,inputs=[]):\n assert len(inputs)>0\n # hadd determines if we are merging main histograms file, or unfolding files\n hadd = True if s.jobtype == \"MRG\" else False\n s.jobfile = os.path.join(s.submitdir, 'merge_wasym.sh' if hadd else 'munfold_wasym.sh')\n s.outROOT = ('root_' if hadd else 'unfold_')+s.tag+\".root\"\n s.outROOTpath = os.path.join('results','ana_wasym',s.outROOT)\n pre = 'merge' if hadd else 'munfold'\n s.outOU = os.path.join(s.submitdir, pre+'_wasym.out.log')\n s.outER = os.path.join(s.submitdir, pre+'_wasym.err.log')\n s.outLOG = os.path.join(s.submitdir, pre+'_wasym.log.log')\n flist = 'wasym.root.list' if hadd else 'wasym.unfold.list'\n s.outputs += [flist]\n f = open(s.jobfile, \"w\")\n print >>f, SH_PRE%(s.fdic[0],s.fdic[1])\n print >>f,'RMODE=merge'\n print >>f,'nexpected=%d'%len(inputs)\n print >>f,'ntot=0'\n print >>f,'rm -f ${ROOTDIR}/%s ; touch ${ROOTDIR}/%s;'%(flist,flist)\n for fin in inputs:\n fname = fin if hadd else '%s.unfold'%fin\n print >>f,'f=\"${RESDIR}/%s.root\"'%fname\n print >>f,'st=`xrd uct3-xrd.mwt2.org existfile $f`'\n print >>f,'if [ \"$st\" == \"The file exists.\" ]; then'\n # xrootd files: reduce cache size, since hadd is stupid and will eat 100% of RAM\n print >>f,'echo ${RESHOST}/$f?cachesz=1000000 >> ${ROOTDIR}/%s'%flist\n print >>f,'((ntot++))'\n print >>f,'else'\n print >>f,'echo ERROR: failed to locate file $f'\n print >>f,'fi'\n print >>f,'if [ \"$ntot\" -eq \"$nexpected\" ]; then echo \"ALL DONE\"; else echo \"ERROR: missing `expr $nexpected - $ntot` files\"; echo exit 202; exit 202; fi'\n print >>f,'if [ \"$ntot\" -eq \"0\" ]; then echo \"ERROR: no files to merge\"; echo exit 203; exit 203; fi'\n print >>f,\"\"\"\n# a special version of hadd that adds files in chunks of 20\nfunction hadd2() {\n local per\n per=30 #20\n fin=$1\n opts=$2\n fout=$3\n shift\n n=`cat $fin | wc -l`\n ngrp=`expr $n / $per`\n nrem=`expr $n % $per`\n if [ \\\"$nrem\\\" == \\\"0\\\" ]; then ngrp=`expr $ngrp - 1`; fi\n for igrp in `seq 0 $ngrp`; do\n\timin=`expr $per \\* $igrp`\n\timax=`expr $per \\* $igrp + $per`\n\tif [ \\\"$imax\\\" -gt \\\"$n\\\" ]; then imax=`expr $per \\* $igrp + $nrem`; fi\n\t# offset by 1\n\timin=`expr $imin + 1`\n\timax=`expr $imax`\n\tidel=`expr $imax - $imin + 1`\n\techo \\\"===== Part $igrp / $ngrp : $imin to $imax\\\"\n\techo hadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\thadd ${opts} \\\"${fout}.TMPHADD_${igrp}.root\\\" `cat $fin | head -n $imax | tail -n $idel`\n\tst=$?\n\tif [ \\\"$st\\\" != \\\"0\\\" ]; then\n\t echo \\\"ERROR: merge step $igrp failed. Bailing out...\\\"\n\t return $st\n\tfi\n done\n # remove opts to speed up the last step and prevent creation of additional ntuple cycles;2\n echo hadd ${fout} ${fout}.TMPHADD_*root*\n hadd ${fout} ${fout}.TMPHADD_*root*\n st=$?\n rm -f ${fout}.TMPHADD_*root*\n return $st\n}\n \"\"\"\n if False:\n if hadd:\n print >>f, 'echo hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -O %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'echo hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n print >>f, 'hadd -T %s `cat ${ROOTDIR}/%s`'%(s.outROOTpath,flist)\n else:\n print >>f, 'hadd2 ${ROOTDIR}/%s \"%s\" %s'%(flist,\"-O\" if hadd else \"-T\",s.outROOTpath)\n print >>f, \"status=$?\"\n print >>f, SH_POST\n f.close()\n os.system('chmod +x %s'%s.jobfile)\n s.write_submit_script()\n return True",
"def makeJob(kallisto, index, meta, bootstraps, files, single, s=1, l=180): \n cmd = \"%(kallisto)s quant -i %(index)s -o %(meta)s \" % locals()\n for file in files: \n cmd += \" ../%s\" % file \n if single: \n cmd += \" --single -l %(l)i -s %(s)i\" % locals()\n cmd += \" &> %s.log.txt\" % meta\n return cmd",
"def build_slurm_header(pars):\n name = pars.get('name','default')\n job = 'job_'+name\n\n lines = []\n lines.append('#!/bin/bash')\n lines.append('#SBATCH --nodes=%s ### Number of nodes'%pars['nodes'])\n lines.append('#SBATCH --ntasks-per-node=%s ### Number of MPI tasks per node'%pars['ntasks_per_node'])\n lines.append('#SBATCH --cpus-per-task=%s ### Number of HT per task'%pars['cpus_per_task'])\n if pars['gpus_per_node'] is not None:\n lines.append('#SBATCH --gpus-per-node=%s ### Number of GPUS per node'%pars['gpus_per_node'])\n if pars['memory'] is not None:\n lines.append('#SBATCH --mem %s ### Memory per node'%pars['memory'])\n if pars['time'] is not None:\n lines.append('#SBATCH --time %s ### Walltime, format: HH:MM:SS'%pars['time'])\n if pars['partition'] is not None:\n lines.append('#SBATCH --partition %s'%pars['partition'])\n if pars['account'] is not None:\n lines.append('#SBATCH --account %s'%pars['account'])\n if pars['qos'] is not None:\n lines.append('#SBATCH --qos %s'%pars['qos'])\n lines.append('#SBATCH --job-name=%s'%job)\n lines.append('#SBATCH --output=%s.out'%job)\n lines.append('')\n lines.append('export OMP_NUM_THREADS=%s'%pars['omp_num_threads'])\n lines.append('')\n lines.append('echo \"Cluster name $SLURM_CLUSTER_NAME\"')\n lines.append('echo \"Job name $SLURM_JOB_NAME \"')\n lines.append('echo \"Job id $SLURM_JOB_ID\"')\n lines.append('echo \"Job nodelist $SLURM_JOB_NODELIST\"')\n lines.append('echo \"Number of nodes $SLURM_JOB_NUM_NODES\"')\n lines.append('echo \"Number of tasks $SLURM_NTASKS\"')\n lines.append('echo \"Number of tasks per node $SLURM_TASKS_PER_NODE\"')\n lines.append('echo \"Number of threads per task $SLURM_CPUS_PER_TASK\"')\n lines.append('echo \"Number of gpus per node $SLURM_GPUS_PER_NODE\"')\n lines.append('echo \"OMP_NUM_THREADS : $OMP_NUM_THREADS\"')\n lines.append('')\n lines.append('echo \" \"')\n lines.append('echo \"###############End of the header section###############\"')\n lines.append('echo \" \"')\n lines.append('')\n\n return lines",
"def submitSlurmScript(commands_list, outputName = None):\n longString = \";\".join(commands_list)\n print(longString.replace(\";\", \"\\n\"))\n if outputName is not None:\n sCommand = 'sbatch -p short -c 1 -t 0-11:59 --mem=60G [email protected] \\\n --output {outputSlurm} --wrap=\"{commandString}\"'.format(commandString = longString, outputSlurm = outputName)\n else: \n sCommand = 'sbatch -p short -c 1 -t 0-11:59 --mem=60G [email protected] \\\n --wrap=\"{0}\"'.format(longString)\n os.system(sCommand)",
"def FSC2(input_dir, num_reps=50, min_sims=100000, max_ecm=20, calc_CI=False, numcores=1, scratch_mb='200', time_scratch=\"01:50:00\", mem=\"200\", print1=False, overwrite=\"None\", fsc2_path=\"/storage/plzen1/home/holcovam/programs/fsc26_linux64/fsc26\"):\n Data_Files = []\n tpl_files = []\n est_files = []\n CI_Data_Files = []\n shlist = []\n\n if input_dir.endswith(\"/\") is False:\n input_dir += \"/\"\n\n for path in os.listdir(input_dir):\n if os.path.isdir(input_dir + path) and path.startswith(\"FSC2input\"):\n samp_name = path.split(\"_\")[1]\n #folder_name = samp_name\n if samp_name + \"_DSFS.obs\" in os.listdir(input_dir + path):\n for i in range(0, num_reps):\n new_file = open(input_dir + path + \"/\" + samp_name + str(i) + \"_DSFS.obs\", 'w')\n with open(input_dir + path + \"/\" + samp_name + \"_DSFS.obs\") as data_file:\n for line in data_file:\n new_file.write(line)\n new_file.close()\n Data_Files.append(input_dir + path + \"/\" + samp_name + str(i) + \"_DSFS.obs\")\n else:\n print(\"Did not find input data file for: \", samp_name)\n if calc_CI == \"True\":\n num_files = 0\n for file in os.listdir(input_dir + path):\n if file.endswith(\"_DSFS.obs\") and file.split(\"_\")[-2].split(\".\")[-1][0:3] == \"rep\" and file != samp_name + \"_DSFS.obs\":\n for i in range(0, num_reps):\n new_file = open(input_dir + path + \"/\" + samp_name + file.split(\"_\")[-2].split(\".\")[-1].split(\"_\")[0]+ \"_\" + str(i) + \"_DSFS.obs\", 'w')\n with open(input_dir + path + \"/\" + file) as data_file:\n for line in data_file:\n new_file.write(line)\n new_file.close()\n CI_Data_Files.append(input_dir + path + \"/\" + samp_name + file.split(\"_\")[-2].split(\".\")[-1].split(\"_\")[0]+ \"_\" + str(i) + \"_DSFS.obs\")\n num_files += 1\n if len(CI_Data_Files) < 1:\n print(\"Did not find bootstrap replicates for: \", samp_name)\n else:\n print(\"Found \", num_files, \" replicate dsfs files for CI calculation for \", samp_name)\n if path.endswith(\".tpl\"):\n tpl_files.append(path)\n est_files.append(path.split(\".\")[0])\n if len(tpl_files) == 0:\n print(\"Did not find any tpl files!! Aborting!!\")\n else:\n if calc_CI == \"True\":\n Data_Files = CI_Data_Files\n for file in Data_Files:\n name = file.split(\"_DSFS\")[0]\n samp_name = name.split(\"/\")[-1]\n folder_name = samp_name [0:11]\n for tpl in tpl_files:\n tpl_name = tpl.split(\".tpl\")[0]\n if os.path.isdir(name + \"_\" + tpl_name) is False or overwrite == \"hard\":\n new_tpl = open(name + \"_\" + tpl_name + \".tpl\", 'w')\n new_data = open(name + \"_\" + tpl_name + \"_DSFS.obs\", 'w')\n\n with open(file, 'r') as data:\n for i, line in enumerate(data):\n if i == 1:\n pop_info = line.strip(\"\\n\").strip(\"\\t\").split(\"\\t\")\n pop_num = int(pop_info[0])\n samp_nums = pop_info[-pop_num:]\n new_data.write(line)\n with open(input_dir + tpl, 'r') as template:\n samp_num_lines = pop_num + 4\n for i, line in enumerate(template):\n if i < samp_num_lines:\n new_tpl.write(line)\n elif i == samp_num_lines:\n for num in samp_nums:\n new_tpl.write(num + \"\\n\")\n elif i >= samp_num_lines + len(samp_nums):\n new_tpl.write(line)\n new_est = open(name + \"_\" + tpl_name + \".est\", 'w')\n try:\n with open(input_dir + tpl_name + \".est\") as est:\n for line in est:\n new_est.write(line)\n except FileNotFoundError:\n print(\"Did not find est file for: \", tpl)\n #folder_name = samp_name ''.join(i for i in s if not i.isdigit())\n shname = name + \"_\" + tpl_name + \".sh\"\n shfile5 = open(shname, 'w')\n shfile5.write('#!/bin/bash -e\\n' +\n '#PBS -N '+samp_name+'\\n' +\n '#PBS -l walltime='+str(time_scratch)+'\\n' +\n '#PBS -l select=1:ncpus='+str(numcores)+':mem='+str(mem)+'mb:scratch_local='+str(scratch_mb)+'mb\\n' +\n '#PBS -m abe\\n' +\n '#PBS -j oe\\n\\n' +\n 'module add python-3.4.1-gcc\\n'+\n 'module add python34-modules-gcc\\n'+\n 'trap \\'clean_scratch\\' TERM EXIT\\n'+\n 'if [ ! -d \"$SCRATCHDIR\" ] ; then echo \"Scratch not created!\" 1>&2; exit 1; fi \\n' +\n 'DATADIR=\"/storage/plzen1/home/holcovam/ScanTools\"\\n' +\n 'cp $DATADIR/'+ input_dir + \"FSC2input_\" + folder_name+ \"/\" + samp_name + \"_\" + tpl_name + '* $SCRATCHDIR || exit 1\\n'+\n 'cp '+fsc2_path+' $SCRATCHDIR || exit 1\\n'+\n 'cd $SCRATCHDIR || exit 2\\n' +\n 'echo data loaded at `date`\\n\\n' +\n 'chmod +x fsc26 \\n' +\n #'ls -l \\n' +\n './fsc26 -t ' + samp_name + \"_\" + tpl_name + '.tpl -e ' + samp_name + \"_\" + tpl_name + '.est -n ' + str(min_sims) + ' -u -d -q -L ' + str(max_ecm) + ' -M \\n' + \n 'rm seed.txt \\n'+\n 'rm fsc26\\n'+\n 'rm *DSFS.obs\\n'+\n 'rm *.sh\\n'+\n 'rm *.tpl \\n'+\n 'rm *.est \\n'+\n #'ls -l \\n' +\n 'cp $SCRATCHDIR/*.par $DATADIR/'+ input_dir + \"FSC2input_\" + folder_name+' || exit 1\\n'+\n 'rm *.par \\n'+\n 'cp -r $SCRATCHDIR/* $DATADIR/'+input_dir+' || export CLEAN_SCRATCH=false\\n'+\n 'printf \"\\\\nFinished\\\\n\\\\n\"\\n')\n shfile5.close()\n shlist.append(shname)\n\n############IF PROBLEM WITH EXCESS OF NONCONVERGED CHAINS, COPY /home/majda/alpine/fastsimcoal2/afterWPSG/scripts/notConverged.py here ###################\n\n else:\n print(\"Output for \" + samp_name + \"_\" + tpl_name + \" already exists. Use hard_overwrite = True to overwrite.\")\n return shlist",
"def _generate_hadoop_shell_script(arg_list, shell_env, working_dir, turi_dist_path, **kwargs):\n script_file = tempfile.NamedTemporaryFile(delete=False)\n logger.debug(\"script file name: \" + script_file.name)\n\n filenames_needed = ['dml_commander_startup',\n 'dml_worker_startup',\n 'libdml_toolkits.so',\n 'libdml_shared.so',\n 'libhdfs.so',\n 'libminipsutil.so',\n 'libc++abi.so.1']\n\n copy_cmd = \"hadoop fs -copyToLocal \" + turi_dist_path + \"/\"\n for i in filenames_needed:\n script_file.write(copy_cmd + DD_BINS_PATH + i + '\\n')\n\n script_file.write(\"chmod 755 ./dml_commander_startup\\n\")\n script_file.write(\"chmod 755 ./dml_worker_startup\\n\")\n script_file.write(\"export LD_LIBRARY_PATH=${JAVA_HOME}/jre/lib/amd64/server:${LD_LIBRARY_PATH}\\n\")\n script_file.write(\"export CLASSPATH=$(hadoop classpath --glob)\\n\")\n for k, v in shell_env.items():\n script_file.write(\"export %s=%s\\n\" % (str(k), str(v)))\n\n script_file.write(\"env\\n\")\n #script_file.write(\"if [ $MY_RANK -eq 0 ]; then\\n\")\n #script_file.write(\" stress --vm-bytes 4g --vm-keep -m 1 --timeout 30\\n\")\n #script_file.write(\"fi\\n\")\n script_file.write(\"if [ $MY_RANK -eq 0 ]; then\\n\")\n script_file.write(\" echo Starting commander\\n\")\n script_file.write(\" ./dml_commander_startup \")\n for arg in arg_list[0]:\n if len(arg) > 7 and arg[0:7] == \"--args=\":\n script_file.write(arg[0:7] + '\"' + arg[7:] + '\" ')\n else:\n script_file.write(arg + \" \")\n script_file.write(\"> >(tee commander.log.stdout) 2> >(tee commander.log.stderr >&2)\")\n script_file.write(\"\\n\")\n script_file.write(\" echo Uploading commander log\\n\")\n script_file.write(\" hadoop fs -put \" + \"./commander.log.stdout \" +\n \"/\".join([working_dir, 'commander.log'])+\".stdout\\n\")\n script_file.write(\" hadoop fs -put \" + \"./commander.log.stderr \" +\n \"/\".join([working_dir, 'commander.log'])+\".stderr\\n\")\n script_file.write(\"else\\n\")\n script_file.write(\" let MY_RANK=$MY_RANK-1\\n\")\n script_file.write(\" echo Starting worker $MY_RANK\\n\")\n script_file.write(\" ./dml_worker_startup \")\n for arg in arg_list[1]:\n script_file.write(arg + \" \")\n script_file.write(\"> >(tee worker.log.stdout) 2> >(tee worker.log.stderr >&2)\")\n script_file.write(\"\\n\")\n script_file.write(\" echo Uploading worker $MY_RANK log\\n\")\n script_file.write(\" hadoop fs -put \" + \"./worker.log.stdout \" +\n \"/\".join([working_dir, \"worker_${MY_RANK}.log\"])+\".stdout\\n\")\n script_file.write(\" hadoop fs -put \" + \"./worker.log.stderr \" +\n \"/\".join([working_dir, \"worker_${MY_RANK}.log\"])+\".stderr\\n\")\n script_file.write(\"fi\\n\")\n script_file.close()\n return script_file.name",
"def start_sbatch_job(collection, exp_array, unobserved=False, name=None,\n output_dir_path=\".\", sbatch_options=None, max_simultaneous_jobs=None,\n debug_server=False):\n import pkg_resources\n\n # Set Slurm job array options\n sbatch_options['array'] = f\"0-{len(exp_array) - 1}\"\n if max_simultaneous_jobs is not None:\n sbatch_options['array'] += f\"%{max_simultaneous_jobs}\"\n\n # Set Slurm output parameter\n if 'output' in sbatch_options:\n raise ConfigError(f\"Can't set sbatch `output` Parameter explicitly. SEML will do that for you.\")\n elif output_dir_path == \"/dev/null\":\n output_file = output_dir_path\n else:\n output_file = f'{output_dir_path}/{name}_%A_%a.out'\n sbatch_options['output'] = output_file\n\n # Construct sbatch options string\n sbatch_options_str = create_slurm_options_string(sbatch_options, False)\n\n # Construct chunked list with all experiment IDs\n expid_strings = [('\"' + ';'.join([str(exp['_id']) for exp in chunk]) + '\"') for chunk in exp_array]\n\n with_sources = ('source_files' in exp_array[0][0]['seml'])\n use_conda_env = ('conda_environment' in exp_array[0][0]['seml']\n and exp_array[0][0]['seml']['conda_environment'] is not None)\n\n # Construct Slurm script\n template = pkg_resources.resource_string(__name__, \"slurm_template.sh\").decode(\"utf-8\")\n prepare_experiment_script = pkg_resources.resource_string(__name__, \"prepare_experiment.py\").decode(\"utf-8\")\n prepare_experiment_script = prepare_experiment_script.replace(\"'\", \"'\\\\''\")\n if 'working_dir' in exp_array[0][0]['seml']:\n working_dir = exp_array[0][0]['seml']['working_dir']\n else:\n working_dir = \"${{SLURM_SUBMIT_DIR}}\"\n\n variables = {\n 'sbatch_options': sbatch_options_str,\n 'working_dir': working_dir,\n 'use_conda_env': str(use_conda_env).lower(),\n 'conda_env': exp_array[0][0]['seml']['conda_environment'] if use_conda_env else \"\",\n 'exp_ids': ' '.join(expid_strings),\n 'with_sources': str(with_sources).lower(),\n 'prepare_experiment_script': prepare_experiment_script,\n 'db_collection_name': collection.name,\n 'sources_argument': \"--stored-sources-dir $tmpdir\" if with_sources else \"\",\n 'verbose': logging.root.level <= logging.VERBOSE,\n 'unobserved': unobserved,\n 'debug_server': debug_server,\n 'tmp_directory': SETTINGS.TMP_DIRECTORY\n }\n setup_command = SETTINGS.SETUP_COMMAND.format(**variables)\n end_command = SETTINGS.END_COMMAND.format(**variables)\n\n script = template.format(\n setup_command=setup_command,\n end_command=end_command,\n **variables,\n )\n\n path = os.path.join(SETTINGS.TMP_DIRECTORY, f'{uuid.uuid4()}.sh')\n with open(path, \"w\") as f:\n f.write(script)\n\n try:\n output = subprocess.run(f'sbatch {path}', shell=True, check=True, capture_output=True).stdout\n except subprocess.CalledProcessError as e:\n logging.error(f\"Could not start Slurm job via sbatch. Here's the sbatch error message:\\n\"\n f\"{e.stderr.decode('utf-8')}\")\n os.remove(path)\n exit(1)\n\n slurm_array_job_id = int(output.split(b' ')[-1])\n for task_id, chunk in enumerate(exp_array):\n for exp in chunk:\n if not unobserved:\n collection.update_one(\n {'_id': exp['_id']},\n {'$set': {\n 'status': States.PENDING[0],\n 'slurm.array_id': slurm_array_job_id,\n 'slurm.task_id': task_id,\n 'slurm.sbatch_options': sbatch_options,\n 'seml.output_file': f\"{output_dir_path}/{name}_{slurm_array_job_id}_{task_id}.out\"}})\n logging.verbose(f\"Started experiment with array job ID {slurm_array_job_id}, task ID {task_id}.\")\n os.remove(path)",
"def createbash(self,executable,**keywords):\n\t\timport os\n\t\timport stat\n\n\t\toutputname = os.path.join(\"Results\",self.outputfile.replace(\".root\",\"_${SGE_TASK_ID}.root\"))\n\t\t# Extract the input files\n\t\tinputfiles = \"\"\n\t\tfor f in self.inputfiles:\n\t\t\tinputfiles += f+\",\"\n\t\tinputfiles = inputfiles[:-1]\n\n\t\tlines = \"#!/bin/bash\\n\"\n\t\tlines += \"\\n# Script created automatically by skimfiles.py utility\\n\"\n\t\tlines += \"\\nmkdir -p Results\\n\"\n\t\tlines += \"export PATH=$PATH:\"+os.path.join(self.basedir,\"bin\")+\":\"+os.path.join(self.pkgpath,\"bin\")+\"\\n\"\n\t\tlines += \"export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:\"+self.libsdir+\"\\n\"\n\t\tlines += \"\\n\"\n\t\tlines += \"EVENTFILE=\"+self.eventsfile+\"\\n\"\n\t\tlines += \"EVENTS=$(cat $EVENTFILE | head -n $SGE_TASK_ID | tail -n 1)\\n\"\n\t\tlines += executable+\" \"+self.cutid+\" -i \"+inputfiles+\" -c \"+self.cutfile+\\\n\t\t\t\t\" -e $EVENTS -o \"+outputname+\"\\n\"\n\t\n\t\tfilename = self.nameID+\".sh\"\n\t\tf = open(filename,\"w\")\n\t\tf.writelines(lines)\n\t\tf.close()\n\t\tos.chmod(filename,stat.S_IRWXU+stat.S_IRGRP+stat.S_IXGRP+stat.S_IXOTH)\n\t\t\n\t\treturn filename",
"def generate(experiment, ifilename, parameterarray):\n import numpy as np\n import os\n # create file in fms_tmp and copy in requisite files\n rsyncstring = \"rsync -a --exclude='climspinup' \\\n'/network/aopp/hera/mad/bakerh/fms_tmp/climspinup/' \\\n'/network/aopp/hera/mad/bakerh/fms_tmp/\" + experiment + \"'\"\n os.system(rsyncstring)\n # separate code to change run_names and write initial files\n runfile = open('/home/bakerh/fms/exp/' + experiment +\n '/run/' + 'runfile', 'w')\n runfile.write('#!/bin/csh -f\\n')\n for i in range(np.ma.size(parameterarray, axis=0)-1):\n ifile = open('/home/bakerh/fms/exp/' + experiment +\n '/run/' + ifilename, 'r')\n lines = ifile.readlines()\n ifile.close()\n ofile = open('/home/bakerh/fms/exp/' + experiment + '/run/' +\n parameterarray[i+1, 0], 'w')\n for line in lines:\n if line.find('label for') != -1:\n ofile.write('set run_name = ' + parameterarray[i+1, 0] + '\\n')\n else:\n ofile.write(line)\n ofile.close()\n os.chmod('/home/bakerh/fms/exp/' + experiment + '/run/' +\n parameterarray[i+1, 0], 33279)\n runfile.write('./' + parameterarray[i+1, 0] + '\\n')\n # copy restart file and create restart text file\n dirtomake = \"mkdir '/network/aopp/hera/mad/bakerh/fms_tmp/\\\n\" + experiment + \"/\" + parameterarray[i+1, 0] + \"'\"\n os.system(dirtomake)\n copyrestart = \"rsync -a '/network/aopp/hera/mad/bakerh/fms_tmp/\\\nclimspinup/climspinup/output/restart/day3600h00.cpio' \\\n'/network/aopp/hera/mad/bakerh/fms_tmp/\\\n\" + experiment + \"/\" + parameterarray[i+1, 0] + \"'\"\n os.system(copyrestart)\n rfile = open('/network/aopp/hera/mad/bakerh/fms_tmp/' + experiment +\n '/' + parameterarray[i+1, 0] + '/reload_commands', 'w')\n rfile.write('set irun = 1\\n\\\nset init_cond = /network/aopp/hera/mad/bakerh/fms_tmp/' +\n experiment + '/' + parameterarray[i+1, 0] +\n '/day3600h00.cpio \\nset ireload = 2')\n rfile.close()\n runfile.close()\n os.chmod('/home/bakerh/fms/exp/' + experiment + '/run/' + 'runfile', 33279)\n # now alter parameters\n for i in range(np.ma.size(parameterarray, axis=0)-1):\n for j in range(np.ma.size(parameterarray, axis=1)-1):\n parameters('/home/bakerh/fms/exp/' + experiment +\n '/run/' + parameterarray[i+1, 0],\n '/home/bakerh/fms/exp/' +\n experiment + '/run/' + parameterarray[i+1, 0],\n parameterarray[0, j+1], parameterarray[i+1, j+1])",
"def write_job_file(job_name, py_file_name='main.py',\n sbatch_path='/Users/omarschall/vanilla-rtrl/job_scripts/',\n scratch_path='/scratch/oem214/vanilla-rtrl/',\n nodes=1, ppn=1, mem=16, n_hours=24):\n\n job_file = os.path.join(sbatch_path, job_name + '.s')\n log_name = os.path.join('log', job_name)\n\n with open(job_file, 'w') as f:\n f.write(\n '#! /bin/bash\\n'\n + '\\n'\n + '#SBATCH --nodes={}\\n'.format(nodes)\n + '#SBATCH --ntasks-per-node=1\\n'\n + '#SBATCH --cpus-per-task={}\\n'.format(ppn)\n + '#SBATCH --mem={}GB\\n'.format(mem)\n + '#SBATCH --time={}:00:00\\n'.format(n_hours)\n + '#SBATCH --job-name={}\\n'.format(job_name[0:16])\n + '#SBATCH --output={}log/{}.o\\n'.format(scratch_path, job_name[0:16])\n + '\\n'\n + 'module purge\\n'\n + 'SAVEPATH={}library/{}\\n'.format(scratch_path, job_name)\n + 'export SAVEPATH\\n'\n + 'module load python3/intel/3.6.3\\n'\n + 'cd /home/oem214/py3.6.3\\n'\n + 'source py3.6.3/bin/activate\\n'\n + 'cd {}\\n'.format(scratch_path)\n + 'pwd > {}.log\\n'.format(log_name)\n + 'date >> {}.log\\n'.format(log_name)\n + 'which python >> {}.log\\n'.format(log_name)\n + 'python {}\\n'.format(py_file_name)\n )\n\n return job_file"
] | [
"0.6855347",
"0.68422127",
"0.6675286",
"0.650287",
"0.645152",
"0.6278125",
"0.6242975",
"0.6226567",
"0.61965185",
"0.61401135",
"0.60027885",
"0.59648293",
"0.5838186",
"0.58326197",
"0.5755615",
"0.5729635",
"0.57208526",
"0.5701956",
"0.56957155",
"0.5687705",
"0.56868994",
"0.5616956",
"0.56149936",
"0.5586537",
"0.55821127",
"0.5569301",
"0.55688465",
"0.5560054",
"0.55533147",
"0.5532846"
] | 0.7628179 | 0 |
Writes the output in TORQUE multiple job submission format. Creates sub shell scripts that contain the workflow for each input file separately. After this main shell script containing TORQUE configuration is created. This script is responsible for starting the sub shells as separate processes. | def write_torque(workloads, input_file_parameters, command_line_parameters):
validate_resource_manager_parameters(
input_file_parameters.resource_manager_params,
['#PBS -k', '#PBS -N', '#PBS -d', '#PBS -e', '#PBS -t'])
workload_index = 0
workload_zfill_amount = len(str(len(workloads)))
workload_file_paths = []
for workload in workloads:
# Each workflow part will have separate file to submit to TORQUE with
# sbatch command. Each file has one or more associated subshell files
# containing contents for each thread.
# Generate strings describing current workload and thread indexes for
# output file names
workload_index += 1
workload_index_string = str(workload_index).zfill(workload_zfill_amount)
file_main_name = '{0}_TORQUE_WORKLOAD_{1}'.format(NAME,
workload_index_string)
# When --fix_run mode is used the output and log files files already
# exist. To prevent overwriting these files with new ones specific
# prefix or appendix strings are added to the new output file names.
appendix = '.sh'
i = 0
if command_line_parameters.fix_run:
mode = 'FIX'
elif command_line_parameters.compress_run == 'compress':
mode = 'COMPRESS'
elif command_line_parameters.compress_run == 'decompress':
mode = 'DECOMPRESS'
else:
mode = None
while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,
file_main_name + appendix)):
i += 1
appendix = '_{0}_{1}.sh'.format(mode, i)
# Generate subshell files
thread_index = 0
for thread_contents in workload:
# Iterate over output commands of each thread and write necessary
# subshell files for each
out_lines = []
cmds_in_thread = len(thread_contents)
for i in xrange(cmds_in_thread):
# Check if any modules need loading or are they loaded by previous command
skip_module_loading = False
if i > 0:
if thread_contents[i].load_module == thread_contents[i-1].load_module:
skip_module_loading = True
# Check if any modules need unloading or will they be used by following command
skip_module_unloading = False
if i < cmds_in_thread-1:
if thread_contents[i].load_module == thread_contents[i+1].load_module:
skip_module_unloading = True
out_lines += generate_subshell_file_contents(thread_contents[i],
skip_module_loading,
skip_module_unloading)
# Write subshell file
thread_index_string = str(thread_index)
fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,
workload_index_string,
thread_index_string,
appendix)
try:
out_fl = open(os.path.join(input_file_parameters.output_dir,
fl_name), 'w')
except:
raise STAPLERerror.STAPLERerror('Unable to create output file:'
'\n{0}'.format(os.path.join(
input_file_parameters.output_dir,
fl_name)))
out_fl.write('\n'.join(out_lines))
out_fl.write('\n')
out_fl.close()
thread_index += 1
# Create lines for TORQUE input file by generating job-name, output,
# error and array parameters based on user input
# IF YOU ADD NEW AUTOMATICALLY INFERRED PARAMETERS, REMEMBER TO VALIDATE
# THEM AT THE BEGINNING OF THIS FUNCTION
resmng_config = list(input_file_parameters.resource_manager_params)
resmng_config.append('#PBS -k eo')
resmng_config.append('#PBS -N {0}'.format(input_file_parameters.job_name))
resmng_config.append('#PBS -d {0}'.format(input_file_parameters.output_dir))
resmng_config.append('#PBS -e {0}'.format(input_file_parameters.output_dir))
resmng_config.append('#PBS -t {0}-{1}'.format(0, len(workload)-1))
resmng_config.append('\n\n')
subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,
workload_index_string,
'"${PBS_ARRAYID}"',
appendix)
subshell_file_path = os.path.join(input_file_parameters.output_dir,
subshell_file_path)
resmng_config.append('source {0}'.format(subshell_file_path))
out_fl_path = os.path.join(input_file_parameters.output_dir,file_main_name + appendix)
workload_file_paths.append(out_fl_path)
try:
out_fl = open(out_fl_path, 'w')
except IOError as emsg:
raise STAPLERerror.STAPLERerror('Unable to create output file:'
'\n{0}\n with error message:\n{1}'
.format(os.path.join(input_file_parameters.output_dir,
file_main_name + appendix),
str(emsg)))
out_fl.write('\n'.join(resmng_config))
out_fl.write('\n')
out_fl.close()
return workload_file_paths | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_unix(workloads, input_file_parameters, command_line_parameters):\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n background_process_list = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_UNIX_WORKLOAD_1'.format(NAME)\r\n\r\n # Add information about current workflow to the main shell script\r\n background_process_list.append('echo \"Running workload part {0}\"'.format(\r\n workload_index))\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is 'FIX' and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n if mode in ('COMPRESS', 'DECOMPRESS'):\r\n appendix = '_{0}.sh'.format(mode)\r\n while os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n thread_zfill_amount = len(str(len(workload)))\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index).zfill(thread_zfill_amount)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n # i.e. use UNIX source to run input shell script, redirect stdout\r\n # and stderr to an .out file.\r\n background_process_list.append('source {0} >> {0}.out 2>&1 &'.format(\r\n os.path.join(input_file_parameters.output_dir,\r\n fl_name)))\r\n thread_index += 1\r\n\r\n # Workflow steps are written to a single output file (instead of\r\n # separate files). \"wait\" command is inserted in between workflow parts\r\n # to synchronize workflows.\r\n background_process_list.append('wait\\n\\n')\r\n\r\n # Write the main shell script file\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('\\n\\n')\r\n resmng_config.append('\\n'.join(background_process_list))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir, file_main_name + appendix)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return [out_fl_path]",
"def gen_jobs(fpath, num_runs, netid):\n\n run = \"\"\n run += \"import sys\\n\"\n run += \"import subprocess\\n\"\n run += \"cmd_array = (\"\n for i in range(num_runs):\n run += \"r\\\"python test.py %d\\\"\" % i\n run += \",\\n\"\n\n run += \")\\n\"\n run += \"p = subprocess.Popen(cmd_array[int(sys.argv[1])-1], shell=True, stdout=subprocess.PIPE)\\n\"\n run += \"out = p.stdout.read()\"\n# run += \"print cmd_array[int(sys.argv[1])]\"\n\n script_name = \"test\"\n\n if verbose:\n print \"Writing array script: \" + \"run.\" + script_name + \".py\"\n f = open(os.path.join(fpath, \"run.\" + script_name + \".py\"), 'w')\n f.write(\"%s\\n\" % run)\n\n f = open(os.path.join(fpath, \"submit_run.\" + script_name + \".sh\"), 'w')\n submit_run = \"#!/bin/csh\\n\"\n submit_run += \"#$ -N %s\\n\" % (\"job_%d\" % num_runs)\n submit_run += \"#$ -t 1:%d\\n\" % (num_runs)\n submit_run += \"#$ -M %[email protected]\\n\\n\" % (netid)\n# submit_run += \"#$ -q short\"\n# submit_run += \"#$ -r y\"\n submit_run += \"python run.%s.py ${SGE_TASK_ID}\" % (script_name)\n\n if verbose:\n print \"Writing submit shell script: \" + \"submit_run.\" + script_name + \".sh\"\n f.write(\"%s\\n\" % submit_run)",
"def write_lsf(workloads, input_file_parameters, command_line_parameters):\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_LSF_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n thread_index += 1\r\n\r\n # Generate parameter file for the bsub run\r\n resmng_config = []\r\n resmng_config.append('#BSUB-J \"{0}[1-{1}]\"'.format(\r\n input_file_parameters.job_name,\r\n len(workload)))\r\n resmng_config.append('#BSUB-i {0}_WORKLOAD_{1}_subshell_{2}{3}'.format(\r\n NAME,\r\n workload_index_string,\r\n '%I',\r\n appendix))\r\n resmng_config.append('#BSUB-o {0}_WORKLOAD_{1}_subshell_{2}{3}.out'.format(\r\n NAME,\r\n workload_index_string,\r\n '%I',\r\n appendix))\r\n resmng_config += input_file_parameters.resource_manager_params\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir, file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def write_sge(workloads, input_file_parameters, command_line_parameters):\r\n validate_resource_manager_parameters(\r\n input_file_parameters.resource_manager_params,\r\n ['# -o', '# -e', '# -t'])\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_SGE_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n prefix = ''\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n prefix = '{0}_{1}_'.format(mode, i)\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 1\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n thread_index += 1\r\n\r\n # Create lines for SGE input file by generating job-name, output,\r\n # error and array parameters based on user input\r\n\r\n status_file_basename = os.path.join(input_file_parameters.output_dir,\r\n prefix +\r\n input_file_parameters.job_name + '_$TASK_ID')\r\n\r\n # IF YOU ADD NEW AUTOMATICALLY INFERRED PARAMETERS, REMEMBER TO VALIDATE\r\n # THEM AT THE BEGINNING OF THIS FUNCTION\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('#$ -o {0}.out'.format(status_file_basename))\r\n resmng_config.append('#$ -e {0}.err'.format(status_file_basename))\r\n resmng_config.append('#$ -t {0}-{1}'.format(1, len(workload)))\r\n\r\n resmng_config.append('\\n\\n')\r\n subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n '\"$SGE_TASK_ID\"',\r\n appendix)\r\n subshell_file_path = os.path.join(input_file_parameters.output_dir,\r\n subshell_file_path)\r\n resmng_config.append('source {0}'.format(subshell_file_path))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def write_slurm(workloads, input_file_parameters, command_line_parameters):\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to SLURM with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_SBATCH_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n prefix = ''\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n prefix = '{0}_{1}_'.format(mode, i)\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index += 1\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n\r\n # Create lines for SLURM input file by generating job-name, output,\r\n # error and array parameters based on user input\r\n status_file_basename = os.path.join(input_file_parameters.output_dir,\r\n prefix + input_file_parameters.job_name)\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('#SBATCH --job-name={0}'.format(input_file_parameters.job_name))\r\n resmng_config.append('#SBATCH --output={0}_%A_%a.out'.format(status_file_basename))\r\n resmng_config.append('#SBATCH --error={0}_%A_%a.err'.format(status_file_basename))\r\n resmng_config.append('#SBATCH --array={0}-{1}'.format(1, len(workload)))\r\n\r\n resmng_config.append('\\n\\n')\r\n subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n '\"$SLURM_ARRAY_TASK_ID\"',\r\n appendix)\r\n subshell_file_path = os.path.join(input_file_parameters.output_dir,\r\n subshell_file_path)\r\n resmng_config.append('source {0}'.format(subshell_file_path))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir,file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def build_job_scripts(model_list, scenario_list, output_dir, cassandra_config_dir, cassandra_log_dir,\n cassandra_main_script, sbatch_account, sbatch_partition='slurm', sbatch_walltime='01:00:00',\n sbatch_ntasks=3, sbatch_nodes=3, sbatch_jobname='cassie', sbatch_logdir='.', template=None):\n\n # use default configuration template file if user does not give one\n if template is None:\n template = pkg_resources.resource_filename('cassie', 'data/sbatch_template.sh')\n\n # existing tags to replace in the template file\n model_tag = '<model>'\n scenario_tag = '<scenario>'\n account_tag = '<account>'\n partition_tag = '<partition>'\n ntasks_tag = '<ntasks>'\n nodes_tag = '<nodes>'\n time_tag = '<walltime>'\n jobname_tag = '<jobname>'\n logdir_tag = '<logdir>'\n cassandra_configdir_tag = '<cassconfigdir>'\n cassandra_logdir_tag = '<casslogdir>'\n cassandra_script_tag = '<cassmainscript>'\n\n for model in model_list:\n for scenario in scenario_list:\n\n output_file = os.path.join(output_dir, f'run_{model.lower()}_{scenario}.sh')\n\n with open(output_file, 'w') as out:\n with open(template) as get:\n\n f = get.read()\n\n # replace tag names with dynamic content\n fx = f.replace(model_tag, model)\n fx = fx.replace(scenario_tag, scenario)\n\n fx = fx.replace(account_tag, sbatch_account)\n fx = fx.replace(partition_tag, sbatch_partition)\n fx = fx.replace(ntasks_tag, str(sbatch_ntasks))\n fx = fx.replace(nodes_tag, str(sbatch_nodes))\n fx = fx.replace(time_tag, sbatch_walltime)\n fx = fx.replace(jobname_tag, sbatch_jobname)\n fx = fx.replace(logdir_tag, sbatch_logdir)\n\n fx = fx.replace(cassandra_configdir_tag, cassandra_config_dir)\n fx = fx.replace(cassandra_logdir_tag, cassandra_log_dir)\n fx = fx.replace(cassandra_script_tag, cassandra_main_script)\n\n out.write(fx)",
"def setup_jobs(self):\n transfer_args = [\"analysis_type\", \"perturbation\", \"num_permutations\", \"permutation_test_statistic\", \"loss_function\",\n \"importance_significance_level\", \"window_search_algorithm\", \"window_effect_size_threshold\"]\n jobs = [None] * self.num_jobs\n for idx in range(self.num_jobs):\n # Create and launch condor job\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n input_files = [features_filename, self.args.model_filename, self.args.model_loader_filename, self.args.data_filename]\n job_dir = f\"{self.args.output_dir}/outputs_{idx}\"\n cmd = f\"python3 -m anamod.core.worker -worker_idx {idx}\"\n for arg in transfer_args:\n if hasattr(self.args, arg):\n cmd += f\" -{arg} {getattr(self.args, arg)}\"\n # Relative file paths for non-shared FS, absolute for shared FS\n for name, path in dict(output_dir=job_dir, features_filename=features_filename, model_filename=self.args.model_filename,\n model_loader_filename=self.args.model_loader_filename, data_filename=self.args.data_filename).items():\n cmd += f\" -{name} {os.path.abspath(path)}\" if self.args.shared_filesystem else f\" -{name} {os.path.basename(path)}\"\n job = CondorJobWrapper(cmd, input_files, job_dir, shared_filesystem=self.args.shared_filesystem,\n memory=f\"{self.args.memory_requirement}GB\", disk=f\"{self.args.disk_requirement}GB\",\n avoid_bad_hosts=self.args.avoid_bad_hosts, retry_arbitrary_failures=self.args.retry_arbitrary_failures,\n cleanup=self.args.cleanup)\n jobs[idx] = job\n return jobs",
"def make_all(i_file, config,\n out_dir, submit=True, pism_root=pism_root, **kwargs):\n\n # make new directory or break if existing\n try:\n os.makedirs(out_dir)\n except OSError:\n print(\"Directory %s exists, skipping it.\" % out_dir)\n return 2\n\n # make config file\n c_path = make_config(config, out_dir=out_dir, pism_root=pism_root)\n\n # make job script chain\n j_list = make_chain(i_file,\n out_dir=out_dir, pism_root=pism_root, **kwargs)\n\n # submit job chain\n if submit is True:\n j_list = submit_chain(j_list)\n\n # no error, return 0\n return 0",
"def create_job(jobrun, vcf_filenames):\n if jobrun == \"cluster\":\n \"\"\"\n Supports only PBS clusters for now.\n \"\"\"\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M [email protected]\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n for i in pbs_scripts:\n print \"Running: qsub %s\" % i\n #os.system(\"qsub %s\" % i)\n\n elif jobrun == \"parallel-local\":\n \"\"\"\n Generate a Command list of each job and run it in parallel on different cores available on local system\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n f3 = open(command_file, 'w+')\n\n\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M [email protected]\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n\n\n for i in pbs_scripts:\n f3.write(\"bash %s\\n\" % i)\n f3.close()\n with open(command_file, 'r') as fpp:\n for lines in fpp:\n lines = lines.strip()\n command_array.append(lines)\n fpp.close()\n print len(command_array)\n if args.numcores:\n num_cores = int(num_cores)\n else:\n num_cores = multiprocessing.cpu_count()\n results = Parallel(n_jobs=num_cores)(delayed(run_command)(command) for command in command_array)\n\n elif jobrun == \"parallel-single-cluster\":\n print \" \"\n else:\n \"\"\"\n Generate a Command list of each job and run it on local system one at a time\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n os.system(\"bash %s\" % command_file)",
"def submit_scripts(self, out):\n program_folder = os.path.join(out, self.out)\n for config in self.configurations:\n config.submit_CaVEMan_scripts(\n program_folder, self.path2exe, self.ref_fai, self.file1, self.file2,\n self.config_file, self.qsub_dir, self.mstep_script, self.merge_script, self.estep_script\n )\n return None",
"def write_default(workflows, output_dir):\r\n\r\n # Calculate the total number of commands\r\n number_of_commands = 0\r\n for workflow in workflows:\r\n number_of_commands += sum(map(len, workflow))\r\n\r\n # Create command line strings\r\n i = 0\r\n out_lines = ['echo Started executing shell script at:', 'date']\r\n for workflow in workflows:\r\n for workflow_step in workflow:\r\n for cmd in workflow_step:\r\n i += 1\r\n cmd_list = cmd.command_lines\r\n cmd_list = map(clean_command_lines, cmd_list)\r\n out_lines.append('echo Executing command {0}/{1}:'\r\n .format(i, number_of_commands))\r\n for c in cmd_list:\r\n c = c.replace('>', '\\\\>')\r\n c = c.replace('|', '\\\\|')\r\n out_lines.append('echo ' + c)\r\n out_lines.append('date')\r\n\r\n #Load modules\r\n if cmd.load_module:\r\n for module in cmd.load_module:\r\n out_lines.append(module)\r\n\r\n #The command\r\n out_lines += cmd_list\r\n\r\n #Unload modules\r\n if cmd.unload_module:\r\n for module in cmd.unload_module:\r\n out_lines.append(module)\r\n out_lines.append('echo Finished at:')\r\n out_lines.append('date')\r\n\r\n #Open and write command lines\r\n fl_name = '{0}_output_{1}.sh'.format(NAME, START_TIME)\r\n output_file_path = os.path.join(output_dir, fl_name)\r\n try:\r\n out_fl = open(output_file_path, 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(output_dir,\r\n fl_name)))\r\n out_fl.write('#!/usr/bin/env bash\\n')\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.close()\r\n return [output_file_path]",
"def _start_torque_workers(self):\n for bundle in self._model.batch_get_bundles(state=State.STAGED, bundle_type='run'):\n resource_args = []\n\n request_cpus = self._compute_request_cpus(bundle)\n if request_cpus:\n resource_args.extend(['-l', 'nodes=1:ppn=%d' % request_cpus])\n\n request_memory = self._compute_request_memory(bundle)\n if request_memory:\n resource_args.extend(['-l', 'mem=%d' % request_memory])\n\n request_queue = bundle.metadata.request_queue or self._default_request_queue\n if request_queue:\n # Either host=<host-name> or <queue-name>, but not tag=<tag>\n m = re.match('host=(.+)', request_queue)\n tagm = re.match('tag=.+', request_queue)\n if m:\n resource_args.extend(['-l', 'host=' + m.group(1)])\n elif not tagm:\n resource_args.extend(['-q', request_queue])\n\n request_priority = bundle.metadata.request_priority or self._default_request_priority\n if request_priority:\n resource_args.extend(['-p', str(request_priority)])\n\n script_args = [\n '--server', self._torque_bundle_service_url,\n '--password-file', self._torque_password_file,\n '--shared-file-system',\n ]\n\n script_env = {\n 'LOG_DIR': self._torque_log_dir,\n 'WORKER_CODE_DIR': self._torque_worker_code_dir,\n # -v doesn't work with spaces, so we have to hack it.\n 'WORKER_ARGS': '|'.join(script_args),\n }\n\n command = self._torque_ssh_command(\n ['qsub',\n '-k', 'n', # do not keep stdout/stderr streams (we redirect them manually to the configured log_dir)\n '-d', '/tmp', # avoid chdir permission problems, worker won't do anything in working directory anyway\n '-v', ','.join([k + '=' + v for k, v in script_env.iteritems()])] +\n resource_args +\n ['-S', '/bin/bash', os.path.join(self._torque_worker_code_dir, 'worker.sh')])\n\n # Throttle Torque commands, sometimes scheduler has trouble keeping up\n elapsed = time.time() - self._last_qsub_time\n if elapsed < self._torque_min_seconds_between_qsub:\n time.sleep(self._torque_min_seconds_between_qsub - elapsed)\n\n try:\n job_handle = subprocess.check_output(command, stderr=subprocess.STDOUT).strip()\n except subprocess.CalledProcessError as e:\n failure_message = 'Failed to launch Torque job: ' + e.output\n logger.info('Failing %s: %s', bundle.uuid, failure_message)\n self._model.update_bundle(\n bundle, {'state': State.FAILED,\n 'metadata': {'failure_message': failure_message}})\n continue\n finally:\n self._last_qsub_time = time.time()\n\n logger.info('Started Torque worker for bundle %s, job handle %s', bundle.uuid, job_handle)\n self._model.set_waiting_for_worker_startup_bundle(bundle, job_handle)",
"def scriptGen(self,tmpd='/tmp/jose',libRev='last',submode='qsub',\n redirect=1,PBSoptions=''):\n jobname=self.name\n outdir=self.outd\n qsubdir=scratchdir+'/qsub/'+todayDate() #subdirectory to deposit the script\n if not os.path.exists(qsubdir): pastry('/bin/mkdir -p '+qsubdir)\n script=qsubdir+'/'+jobname+'.sh' #full script file name\n\n if len(jobname) > 15:\n sys.stderr.write('Error: job name '+jobname+' cannot exceed 15 characters')\n return ''\n if not os.path.exists(outdir): os.system('/bin/mkdir -p '+outdir)\n buf=''\n ulimit=int(float(mem_limit)*1024) #maximum resident memory size (Kb) to prevent swapping\n wd=tmpd+'/${PBS_JOBID}'\n #wd=tmpd+'/'+ re.compile('\\W').sub('',self.name) +'_$$' #working directory\n logname=jobname+'.log'\n local_log=wd+'/'+logname\n remote_log=outdir+'/'+logname\n buf= '#!/bin/bash\\n\\n'\n buf+= PBSoptions+'\\n\\n'\n buf+= '#bash function to update library\\n'\n buf+= self.updateNodeLib(libRev)+'\\n\\n'\n buf+= '#bash function to import temporary libs\\n'\n buf+= self.shared_temporal_libraries()+'\\n\\n'\n buf+= '#bash function to clean exit\\n'\n buf+= self.cleanup_exit(submode=submode)+'\\n\\n'\n buf+= 'echo \"'+script+'\"\\n' #write script name withing script body\n buf+= 'hostname\\n' #node where job will be run\n buf+= 'echo $PBS_JOBID\\n'\n buf+= 'ulimit -m '+`ulimit`+' #maximum memory\\n'\n buf+= 'source ~/.bash_profile >/dev/null #environment variables\\n'\n buf+= 'wd='+wd+' #working directory\\n'\n buf+= '/bin/mkdir -p $wd\\n'\n buf+= 'export LOCAL_LOG=\"'+local_log+'\"\\n'\n buf+= '/bin/touch $LOCAL_LOG\\n'\n if submode=='sub' and redirect:\n buf+='exec &> $LOCAL_LOG #redirect STODOUT, STDERR to LOCAL_LOG\\n' \n buf+= 'export REMOTE_LOG=\"'+remote_log+'\"\\n'\n\n but+= '#clean up old log file\\n'\n buf+= 'if [ -f $REMOTE_LOG ]; then\\n' \n buf+= ' /bin/rm -f $REMOTE_LOG\\n'\n buf+= 'fi\\n\\n'\n\n buf+= 'trap \"cleanup_exit 1\" TERM #in case of killing job\\n\\n'\n\n buf+= '#update node code library && import libraries\\n'\n buf+= 'if !('\n buf+= 'updateNodeLib && ' \n buf+= 'shared_temporal_libraries _PREPARE_'\n buf+= ');then\\n'\n buf+= ' cleanup_exit 1\\n'\n buf+= 'fi\\n\\n'\n \n buf+= '/bin/cp '+' '.join(self.inpl)+' $wd #bring input files\\n' \n buf+= 'cd $wd\\n\\n'\n buf+= '#Test command success\\n'\n buf+= 'exs=0 #variable holding script exit status\\n'\n buf+= 'if !('\n buf+= self.exe\n buf+= ');then\\n'\n buf+= ' exs=1\\n'\n buf+= 'fi\\n\\n'\n buf+= '#move even partial results (exs=1)\\n'\n buf+= '/bin/mv '+' '.join(self.outl)+' '+outdir+'\\n'\n buf+= 'cleanup_exit $exs'\n\n open(script,'w').write(buf)\n pastry('chmod u+x '+script)\n\n return script",
"def run_jobs(num_runs):\n\n if os.environ.get('OS','') == 'Windows_NT':\n p = subprocess.Popen(\"dir /A:-d /B | findstr/r \\\"submit_run.*.sh\\\"\", shell=True, stdout=subprocess.PIPE)\n else:\n p = subprocess.Popen(\"ls -l | grep 'submit_run.*.sh' | awk '{print $9}'\", shell=True, stdout=subprocess.PIPE)# list SGE submit files\n out = p.stdout.read()\n \n if os.environ.get('OS','') == 'Windows_NT':\n fnames = out.rsplit(\"\\r\\n\")\n else:\n fnames = out.rsplit(\"\\n\")\n\n if len(fnames) > 0: del fnames[-1]\n\n # determine whether 'qsub' command is available\n if (is_valid_command('qsub')): # run the commands jobs using qsub\n for fname in fnames:\n p = subprocess.Popen(\"qsub %s\" % fname, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = p.stderr.read()\n if verbose:\n print out\n print \"Jobs submitted.\"\n else: # run the commands sequentially without using qsub\n print \"Error: 'qsub' is an invalid command.\"\n if os.environ.get('OS','') == 'Windows_NT':\n p = subprocess.Popen(\"dir /A:-d /B | findstr/r \\\"run.*.py\\\"\", shell=True, stdout=subprocess.PIPE)\n else:\n p = subprocess.Popen(\"ls -l | grep 'run.*.py' | awk '{print $9}'\", shell=True, stdout=subprocess.PIPE) # list SGE submit files\n out = p.stdout.read()\n\n if os.environ.get('OS','') == 'Windows_NT':\n fnames = out.rsplit(\"\\r\\n\")\n else:\n fnames = out.rsplit(\"\\n\")\n if len(fnames) > 0: del fnames[-1]\n\n for fname in fnames:\n for i in range(num_runs):\n if verbose:\n print \"Executing command: python %s %d\" % (fname, i)\n p = subprocess.Popen(\"python %s %d\" % (fname, i), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out = p.stderr.read()\n if verbose:\n print out",
"def makeJob(kallisto, index, meta, bootstraps, files, single, s=1, l=180): \n cmd = \"%(kallisto)s quant -i %(index)s -o %(meta)s \" % locals()\n for file in files: \n cmd += \" ../%s\" % file \n if single: \n cmd += \" --single -l %(l)i -s %(s)i\" % locals()\n cmd += \" &> %s.log.txt\" % meta\n return cmd",
"def eddieSubmit(model_list, config,rootDir, verbose=False, resubmit=None, runCode=None):\r\n \r\n outputDir=os.path.join(rootDir,'jobOutput') # directory where output goes. \r\n # try and create it. \r\n try: \r\n os.makedirs(outputDir)\r\n except OSError:\r\n if not os.path.isdir(outputDir):\r\n raise\r\n \r\n sshCmd='ssh login01.ecdf.ed.ac.uk \" cd %s ; '%(os.getcwd()) # need to ssh to a login node to do things to Q's and cd to current dir\r\n #\r\n modelDirFile=os.path.join(rootDir,'tempDirList.txt') # name of file containing list of directories for post processing stage\r\n with open(modelDirFile, 'w') as f:\r\n for m in model_list:\r\n f.write(m.dirPath+','+m.ppExePath()+','+m.ppOutputFile()+'\\n') # write out info for post processing job.\r\n # submit the following.. Need path to postProcess.sh\r\n jobName='PP'+config.name()\r\n ## work out postprocess script path\r\n postProcess=os.path.expandvars('$OPTCLIMTOP/eddie/postProcess.sh')\r\n scriptName=os.path.expandvars('$OPTCLIMTOP/eddie/qsub.sh')\r\n # TODO move to better python syntax for var printing. Think can use named vars in below.\r\n qsub_cmd='qsub -l h_vmem=2G -l h_rt=00:10:00 -V -cwd -e %s -o %s'%(outputDir,outputDir) # std stuff for submission\r\n # means # 2 Gbyte Mem 10 min run, cur env, curr wd, output (error & std) in OutputDir\r\n # deal with runCode\r\n if runCode is not None: qsub_cmd += ' -P %s '%(runCode)\r\n cmd = qsub_cmd+' -t 1:%d -h -N %s '%(len(model_list),jobName)\r\n cmd += postProcess\r\n cmd += \" %s %s \"%(modelDirFile, config.fileName())\r\n if verbose: print \"postProcess task array cmd is \",cmd\r\n # run the post process and get its job id\r\n jid = subprocess.check_output(sshCmd+cmd+'\"', shell=True)\r\n # '\"' and shell=True seem necessary. Would be good to avoid both\r\n postProcessJID=jid.split()[2].split('.')[0] # extract the actual job id.\r\n if verbose: print \"postProcess array job id is %s\"%postProcessJID\r\n # TODO wrap this in a try/except block.\r\n # write the jobid + N into the model -- for later when \r\n # model gets some processing.\r\n for indx in range(len(model_list)):\r\n model_list[indx].jid=postProcessJID+'.%d'%(indx+1)\r\n\r\n # now submit this entire script so that the next iteration in the algorithm.\r\n # can be run\r\n if resubmit is not None:\r\n # submit the next job in the iteration. -hold_jid jid means the post processing job will only run after the\r\n # arry of post processing jobs has ran.\r\n jobName='RE'+config.name()\r\n # TODO move to better python syntax for var printing. Think can use named vars in...\r\n cmd = [qsub_cmd,'-hold_jid %s -N %s %s'%(postProcessJID,jobName, scriptName)]\r\n cmd.extend(resubmit) # add the arguments in including the programme to run..\r\n cmd=' '.join(cmd) # convert to one string.\r\n if verbose: print \"Next iteration cmd is \", cmd\r\n jid = subprocess.check_output(sshCmd+cmd+'\"', shell=True) # submit the script. Good to remove shell=True and '\"'\r\n jid = jid.split()[2] # extract the actual job id.\r\n if verbose: print \"Job ID for next iteration is %s\"%jid\r\n # now submit the models\r\n for m in model_list:\r\n # need to put the post processing job release command in the model somehow. Depends on the model\r\n # but we have a mark and a file. So will modify the file. The model should define this..\r\n # and insert the mark into the file. Would I think be easier to keep the line no and goto that.\r\n for line in fileinput.input(m.postProcessFile, inplace=1, backup='.bak2'):\r\n # if m.postProcessFile does not exist then get an error which is what we want!\r\n # fix your model method!\r\n print line[0:-1] # just print the line out.\r\n if m.postProcessMark in line: # got the mark so add some text.\r\n print sshCmd,'qrls ',m.jid,'\"' # this releases the post processing job.\r\n # dealt with modifying main file.\r\n modelSubmitName=m.submit()\r\n if verbose: print \"Submitting \",modelSubmitName\r\n subprocess.check_output(sshCmd+modelSubmitName+'\"',shell=True) # submit the script\r\n\r\n return True",
"def submit_scripts(self, out):\n program_folder = os.path.join(out, self.out)\n for config in self.configurations:\n config.submit_script(program_folder)\n return None",
"def go(self):\n\n self._write_master()\n num_fofs = self.fofs['fofid'].max()\n fof_splits = split.get_splits(num_fofs, self['chunksize'])\n\n njobs=0\n fobj=None\n\n icondor=0\n for isplit,fof_split in enumerate(fof_splits):\n if njobs % self['jobs_per_sub']==0:\n if fobj is not None:\n fobj.close()\n fobj = self._open_condor_script(icondor)\n icondor += 1\n\n self._write_split(fobj, isplit, fof_split)\n\n njobs += 1",
"def build_submission_script(path,\n script_name,\n save_history=True,\n walltime=10,\n allocation='p30653',\n cores=1,\n memory=4):\n\n # define paths\n path = abspath(path)\n job_script_path = join(path, 'scripts', 'submit.sh')\n\n # copy run script to scripts directory\n run_script = abspath(__file__).rsplit('/', maxsplit=2)[0]\n run_script = join(run_script, 'scripts', script_name)\n shutil.copy(run_script, join(path, 'scripts'))\n\n # determine queue\n if walltime <= 4:\n queue = 'short'\n elif walltime <= 48:\n queue = 'normal'\n else:\n queue = 'long'\n\n # declare outer script that reads PATH from file\n job_script = open(job_script_path, 'w')\n job_script.write('#!/bin/bash\\n')\n\n # move to job directory\n job_script.write('cd {:s} \\n\\n'.format(path))\n\n # begin outer script for processing job\n job_script.write('while IFS=$\\'\\\\t\\' read P\\n')\n job_script.write('do\\n')\n job_script.write('b_id=$(echo $(basename ${P}) | cut -f 1 -d \\'.\\')\\n')\n job_script.write(' JOB=`msub - << EOJ\\n\\n')\n\n # =========== begin submission script for individual batch ============\n job_script.write('#! /bin/bash\\n')\n job_script.write('#MSUB -A {:s} \\n'.format(allocation))\n job_script.write('#MSUB -q {:s} \\n'.format(queue))\n job_script.write('#MSUB -l walltime={0:02d}:00:00 \\n'.format(walltime))\n job_script.write('#MSUB -m abe \\n')\n #job_script.write('#MSUB -M [email protected] \\n')\n job_script.write('#MSUB -o ./log/${b_id}/outlog \\n')\n job_script.write('#MSUB -e ./log/${b_id}/errlog \\n')\n job_script.write('#MSUB -N ${b_id} \\n')\n job_script.write('#MSUB -l nodes=1:ppn={:d} \\n'.format(cores))\n job_script.write('#MSUB -l mem={:d}gb \\n\\n'.format(memory))\n\n # load python module and metabolism virtual environment\n job_script.write('module load python/anaconda3.6\\n')\n job_script.write('source activate ~/pythonenvs/growth_env\\n\\n')\n\n # move to job directory\n job_script.write('cd {:s} \\n\\n'.format(path))\n\n # run script\n job_script.write('python ./scripts/{:s}'.format(script_name)+' ${P} ')\n args = (save_history,)\n job_script.write('-s {:d}\\n'.format(*args))\n job_script.write('EOJ\\n')\n job_script.write('`\\n\\n')\n # ============= end submission script for individual batch ============\n\n # print job id\n #job_script.write('echo \"JobID = ${JOB} submitted on `date`\"\\n')\n job_script.write('done < ./batches/index.txt \\n')\n job_script.write('echo \"All batches submitted as of `date`\"\\n')\n job_script.write('exit\\n')\n\n # close the file\n job_script.close()\n\n # change the permissions\n chmod(job_script_path, 0o755)",
"def write_shell_scripts(airfoils, qsh_template, nsetup, ntype, out_dir):\n for nairfoil, sim_setup in airfoils.iteritems():\n for aoa in sim_setup['aoas']:\n # Create simulation name\n sim_name = create_sim_name(nairfoil, ntype, nsetup, aoa)\n # Create fluent journal file\n with open(qsh_template, 'r') as f:\n qtxt = f.read()\n # Start to replace parameters inside the journal\n qtxt = qtxt.replace('SIMNAME', sim_name)\n qtxt = qtxt.replace('in.jou', sim_name + '.jou')\n qtxt = qtxt.replace('fluent.out', sim_name + '.out')\n # Write new shell script to out_dir\n qout = sim_name + '.qsh'\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n with open(os.path.join(out_dir, qout), 'w') as f:\n f.write(qtxt)\n return True",
"def job_workflow(workflow, jobfiles, jwcl=WCL()):\n #pylint: disable=protected-access,expression-not-assigned,lost-exception\n global pool\n global results\n global stop_all\n global jobfiles_global\n global job_track\n global keeprunning\n global donejobs\n global result_lock\n global lock_monitor\n\n infullnames = {}\n with open(workflow, 'r') as workflowfh:\n # for each wrapper execution\n lines = workflowfh.readlines()\n sys.stdout.flush()\n inputs = {}\n # read in all of the lines in dictionaries\n for linecnt, line in enumerate(lines):\n wrapnum = miscutils.fwsplit(line.strip())[0]\n task = parse_wrapper_line(line, linecnt)\n #task['logfile'] = None\n wcl = WCL()\n with open(task['wclfile'], 'r') as wclfh:\n wcl.read(wclfh, filename=task['wclfile'])\n wcl.update(jwcl)\n\n # get fullnames for inputs and outputs\n ins, _ = intgmisc.get_fullnames(wcl, wcl, None)\n del wcl\n # save input filenames to eliminate from junk tarball later\n infullnames[wrapnum] = []\n for isect in ins:\n for ifile in ins[isect]:\n infullnames[wrapnum].append(ifile)\n jobfiles['infullnames'].extend(ifile)\n inputs[wrapnum] = (task, copy.deepcopy(jobfiles), jwcl, ins)\n job_track[task['wrapnum']] = (task['logfile'], jobfiles)\n # get all of the task groupings, they will be run in numerical order\n tasks = jwcl[\"fw_groups\"].keys()\n tasks.sort()\n # loop over each grouping\n manager = mp.Manager()\n for task in tasks:\n results = [] # the results of running each task in the group\n # get the maximum number of parallel processes to run at a time\n nproc = int(jwcl[\"fw_groups\"][task][\"fw_nthread\"])\n procs = miscutils.fwsplit(jwcl[\"fw_groups\"][task][\"wrapnums\"])\n tempproc = []\n # pare down the list to include only those in this run\n for p in procs:\n if p in inputs.keys():\n tempproc.append(p)\n procs = tempproc\n if nproc > 1:\n numjobs = len(procs)\n # set up the thread pool\n pool = mp.Pool(processes=nproc, maxtasksperchild=2)\n outq = manager.Queue()\n errq = manager.Queue()\n with lock_monitor:\n try:\n donejobs = 0\n # update the input files now, so that it only contains those from the current taks(s)\n for inp in procs:\n jobfiles_global['infullnames'].extend(infullnames[inp])\n # attach all the grouped tasks to the pool\n [pool.apply_async(job_thread, args=(inputs[inp] + (outq, errq, True,),), callback=results_checker) for inp in procs]\n pool.close()\n time.sleep(10)\n while donejobs < numjobs and keeprunning:\n count = 0\n while count < 2:\n count = 0\n try:\n msg = outq.get_nowait()\n print msg\n except:\n count += 1\n try:\n errm = errq.get_nowait()\n sys.stderr.write(errm)\n except:\n count += 1\n time.sleep(.1)\n except:\n results.append(1)\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=4, file=sys.stdout)\n\n raise\n\n finally:\n if stop_all and max(results) > 0:\n # wait to give everything time to do the first round of cleanup\n time.sleep(20)\n # get any waiting messages\n for _ in range(1000):\n try:\n msg = outq.get_nowait()\n print msg\n except:\n break\n for _ in range(1000):\n try:\n errm = errq.get_nowait()\n sys.stderr.write(errm)\n except:\n break\n if not result_lock.acquire(False):\n lock_monitor.wait(60)\n else:\n result_lock.release()\n # empty the worker queue so nothing else starts\n terminate(force=True)\n # wait so everything can clean up, otherwise risk a deadlock\n time.sleep(50)\n del pool\n while True:\n try:\n msg = outq.get(timeout=.1)\n print msg\n except:\n break\n\n while True:\n try:\n errm = errq.get(timeout=.1)\n sys.stderr.write(errm)\n except:\n break\n # in case the sci code crashed badly\n if not results:\n results.append(1)\n jobfiles = jobfiles_global\n jobfiles['infullnames'] = list(set(jobfiles['infullnames']))\n if stop_all and max(results) > 0:\n return max(results), jobfiles\n # if running in single threaded mode\n else:\n temp_stopall = stop_all\n stop_all = False\n\n donejobs = 0\n for inp in procs:\n try:\n jobfiles_global['infullnames'].extend(infullnames[inp])\n results_checker(job_thread(inputs[inp] + (sys.stdout, sys.stderr, False,)))\n except:\n (extype, exvalue, trback) = sys.exc_info()\n traceback.print_exception(extype, exvalue, trback, file=sys.stdout)\n results = [1]\n jobfiles = jobfiles_global\n if results[-1] != 0:\n return results[-1], jobfiles\n stop_all = temp_stopall\n\n\n return 0, jobfiles",
"def run_multiple(self, num_episodes=5, base_output_name=\"logs/output_command\"):\n for i in range(num_episodes):\n client.reset()\n client.confirmConnection()\n client.enableApiControl(True)\n client.armDisarm(True)\n airsim.time.sleep(1)\n client.takeoffAsync().join()\n output_filename = base_output_name + \"{:02d}\".format(i) + \".txt\"\n self.move(output_filename, self.input_file_name)\n self.clear_logging_arr()",
"def post_build(self, manager):\n if not self.output_files_dir.exists():\n return\n\n output_file_dirs = [\n d for d in self.output_files_dir.rglob(\"*\") if d.is_dir()\n ] + [self.output_files_dir]\n for output_file_dir in output_file_dirs:\n stem = output_file_dir.relative_to(self.output_files_dir)\n api_path = self.api_dir / stem / ALL_JSON\n\n yield self.task(\n name=f\"contents:{stem}\",\n doc=f\"create a Jupyter Contents API response for {stem}\",\n actions=[\n (self.one_contents_path, [output_file_dir, api_path]),\n (self.maybe_timestamp, [api_path]),\n ],\n file_dep=[p for p in output_file_dir.rglob(\"*\") if not p.is_dir()],\n targets=[api_path],\n )",
"def run( **kwargs ):\n\n # combine options using correct preference\n options = dict(PARAMS.items())\n options.update( getCallerLocals().items() )\n options.update( kwargs.items() )\n\n def setupJob( session ):\n\n jt = session.createJobTemplate()\n jt.workingDirectory = os.getcwd()\n jt.jobEnvironment = { 'BASH_ENV' : '~/.bashrc' }\n jt.args = []\n jt.nativeSpecification = \"-V -q %s -p %i -N %s %s\" % \\\n (options.get(\"job_queue\", global_options.cluster_queue ),\n options.get(\"job_priority\", global_options.cluster_priority ),\n \"_\" + re.sub( \"[:]\", \"_\", os.path.basename(options.get(\"outfile\", \"ruffus\" ))),\n options.get(\"job_options\", global_options.cluster_options))\n\n # keep stdout and stderr separate\n jt.joinFiles=False\n\n return jt\n\n shellfile = os.path.join( os.getcwd(), \"shell.log\" )\n \n # run multiple jobs\n if options.get( \"statements\" ):\n\n statement_list = []\n for statement in options.get(\"statements\"): \n options[\"statement\"] = statement\n statement_list.append(buildStatement( **options))\n \n if options.get( \"dryrun\", False ): return\n\n # get session for process - only one is permitted\n pid = os.getpid()\n if pid not in global_sessions: \n\n L.debug( \"creating new drmaa session for pid %i\" % pid )\n global_sessions[pid]=drmaa.Session() \n global_sessions[pid].initialize()\n\n session = global_sessions[pid]\n \n jt = setupJob( session )\n \n jobids, filenames = [], []\n for statement in statement_list:\n # create job script\n tmpfile = tempfile.NamedTemporaryFile( dir = os.getcwd() , delete = False )\n tmpfile.write( \"#!/bin/bash\\n\" ) # -l -O expand_aliases\\n\" )\n tmpfile.write( 'echo \"START--------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( '''echo 'statement=%s' >> %s\\n''' % (statement, shellfile) )\n tmpfile.write( \"set &>> %s\\n\" % shellfile)\n tmpfile.write( \"module list &>> %s\\n\" % shellfile )\n tmpfile.write( 'echo \"END----------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( expandStatement(statement) + \"\\n\" )\n tmpfile.close()\n\n # build paths\n job_path = os.path.abspath( tmpfile.name )\n stdout_path = job_path + \".stdout\" \n stderr_path = job_path + \".stderr\" \n\n jt.remoteCommand = job_path\n jt.outputPath=\":\"+ stdout_path\n jt.errorPath=\":\" + stderr_path\n\n os.chmod( job_path, stat.S_IRWXG | stat.S_IRWXU )\n\n jobid = session.runJob(jt)\n jobids.append( jobid )\n filenames.append( (job_path, stdout_path, stderr_path) )\n\n L.debug( \"job has been submitted with jobid %s\" % str(jobid ))\n \n L.debug( \"waiting for %i jobs to finish \" % len(jobids) )\n session.synchronize(jobids, drmaa.Session.TIMEOUT_WAIT_FOREVER, False)\n \n # collect and clean up\n for jobid, statement, paths in zip( jobids, statement_list, filenames) :\n job_path, stdout_path, stderr_path = paths\n retval = session.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)\n\n stdout, stderr = getStdoutStderr( stdout_path, stderr_path )\n\n if retval.exitStatus != 0:\n raise PipelineError( \"---------------------------------------\\n\"\n \"Child was terminated by signal %i: \\n\"\n \"The stderr was: \\n%s\\n%s\\n\" \n \"---------------------------------------\\n\" % \\\n (retval.exitStatus, \n \"\".join( stderr),\n statement ) )\n\n os.unlink( job_path )\n \n session.deleteJobTemplate(jt)\n\n # run a single parallel job\n elif (options.get( \"job_queue\" ) or options.get( \"to_cluster\" )) and not global_options.without_cluster:\n\n statement = buildStatement( **options )\n\n if options.get( \"dryrun\", False ): return\n\n tmpfile = tempfile.NamedTemporaryFile( dir = os.getcwd() , delete = False )\n tmpfile.write( \"#!/bin/bash\\n\" ) # -l -O expand_aliases\\n\" )\n\n tmpfile.write( 'echo \"START--------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( 'echo \"statement=%s\" >> %s\\n' % (statement, shellfile) )\n tmpfile.write( \"set &>> %s\\n\" % shellfile)\n tmpfile.write( \"module list &>> %s\\n\" % shellfile )\n tmpfile.write( 'echo \"END----------------------------------\" >> %s \\n' % shellfile )\n tmpfile.write( expandStatement( statement ) + \"\\n\" )\n tmpfile.close()\n\n job_path = os.path.abspath( tmpfile.name )\n stdout_path = job_path + \".stdout\" \n stderr_path = job_path + \".stderr\" \n\n os.chmod( job_path, stat.S_IRWXG | stat.S_IRWXU )\n\n # get session for process - only one is permitted\n pid = os.getpid()\n if pid not in global_sessions:\n L.debug( \"creating new drmaa session for pid %i\" % pid )\n global_sessions[pid]=drmaa.Session() \n global_sessions[pid].initialize()\n\n session = global_sessions[pid]\n\n jt = setupJob( session )\n\n jt.remoteCommand = job_path\n # later: allow redirection of stdout and stderr to files; can even be across hosts?\n jt.outputPath=\":\"+ stdout_path\n jt.errorPath=\":\" + stderr_path\n\n if \"job_array\" in options and options[\"job_array\"] != None:\n # run an array job\n start, end, increment = options.get(\"job_array\" )\n L.debug(\"starting an array job: %i-%i,%i\" % (start, end, increment ))\n # sge works with 1-based, closed intervals\n jobids = session.runBulkJobs( jt, start+1, end, increment )\n L.debug( \"%i array jobs have been submitted as jobid %s\" % (len(jobids), jobids[0]) )\n retval = session.synchronize(jobids, drmaa.Session.TIMEOUT_WAIT_FOREVER, True)\n else:\n jobid = session.runJob(jt)\n L.debug( \"job has been submitted with jobid %s\" % str(jobid ))\n try:\n retval = session.wait(jobid, drmaa.Session.TIMEOUT_WAIT_FOREVER)\n except Exception, msg:\n # ignore message 24 in PBS\n # code 24: drmaa: Job finished but resource usage information and/or termination status could not be provided.\":\n if not msg.message.startswith(\"code 24\"): raise\n retval = None\n\n stdout, stderr = getStdoutStderr( stdout_path, stderr_path )\n\n if \"job_array\" not in options:\n if retval and retval.exitStatus != 0:\n raise PipelineError( \"---------------------------------------\\n\"\n \"Child was terminated by signal %i: \\n\"\n \"The stderr was: \\n%s\\n%s\\n\"\n \"-----------------------------------------\" % \\\n (retval.exitStatus, \n \"\".join( stderr), statement))\n \n session.deleteJobTemplate(jt)\n os.unlink( job_path )\n\n else:\n statement = buildStatement( **options )\n\n if options.get( \"dryrun\", False ): return\n \n if \"<(\" in statement:\n if \"'\" in statement: raise ValueError( \"advanced bash syntax combined with single quotes\" )\n statement = \"\"\"/bin/bash -c '%s'\"\"\" % statement\n\n process = subprocess.Popen( expandStatement( statement ),\n cwd = os.getcwd(), \n shell = True,\n stdin = subprocess.PIPE,\n stdout = subprocess.PIPE,\n stderr = subprocess.PIPE )\n\n # process.stdin.close()\n stdout, stderr = process.communicate()\n\n if process.returncode != 0:\n raise PipelineError( \"---------------------------------------\\n\"\n \"Child was terminated by signal %i: \\n\"\n \"The stderr was: \\n%s\\n%s\\n\"\n \"-----------------------------------------\" % \\\n (-process.returncode, stderr, statement ))",
"def arcSubmit_oneJob(model_list, config,rootDir, verbose=False, resubmit=None, runCode=None):\r\n\t\r\n\t#jobID = []\r\n\tfor model in model_list:\r\n\t\t# put some dummy data in the ouput file\r\n\t\tmodelSubmitName=model.submit()\r\n\t\tif verbose: print \"Submitting \",modelSubmitName\r\n\t\twith cd(model.dirPath):\r\n\t\t\tsubprocess.check_output(modelSubmitName, shell=True) # submit the script\r\n\r\n\treturn True",
"def RunJobs(self, runfile_mapping, server_run_map):\n if self.workflow is None:\n raise RuntimeError(\"Tried to create unnamed workflow!\")\n\n \n # Generate jobs for the first pass over the data\n for run in sorted(runfile_mapping.keys()):\n if self.VERBOSE>0:\n inputfiles=\"/%s/rawdata/volatile/%s/rawdata/Run%06d/hd_rawdata_*.evio\"%(HDRunFileRAIDList.GetRAIDDirFromRun(run,server_run_map),HDJobUtils.GetRunPeriodFromRun(run),run)\n\n # PASS 0\n print \"processing run %d, phase 0 ...\"%(int(run))\n\n # set up command to execute\n if self.nthreads:\n cmd += \" %s/scripts/%s %s %s %06d %03d %d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass0.csh\",self.basedir,run,inputfiles,int(self.nthreads))\n else:\n cmd += \" %s/scripts/%s %s %s %06d %03d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass0.csh\",self.basedir,run,inputfiles)\n\n # run command\n os.system(cmd)\n\n # PASS 1\n print \"processing run %d, phase 1 ...\"%(int(run))\n\n # set up command to execute\n if self.nthreads:\n cmd += \" %s/scripts/%s %s %s %06d %03d %d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass1.csh\",self.basedir,run,inputfiles,int(self.nthreads))\n else:\n cmd += \" %s/scripts/%s %s %s %06d %03d\"%(self.basedir,\"job_wrapper_local.csh\",\"local_calib_pass1.csh\",self.basedir,run,inputfiles)\n\n # run command\n os.system(cmd)",
"def main():\n gh = Github(os.environ['GH_TOKEN'])\n # gh_token = os.environ['GH_TOKEN']\n gc_token_path = os.environ['GC_STORAGE_KEY']\n\n vars_file_path = os.getenv('VARS_FILE', \"\")\n pipeline_id = os.getenv('CI_PIPELINE_ID', 0)\n\n repo = gh.get_repo(\"kubeinit/kubeinit\")\n branches = repo.get_branches()\n\n output = 0\n # Something linke:\n # url = \"https://gitlab.com/kubeinit/kubeinit-ci/pipelines/\"\n url = os.getenv('CI_PIPELINE_URL', \"\")\n print(\"The job results will be published in runtime at: \" + url)\n\n for branch in branches:\n for pr in repo.get_pulls(state='open', sort='created', base=branch.name):\n labels = [item.name for item in pr.labels]\n\n sha = pr.head.sha\n committer_email = repo.get_commit(sha=sha).commit.committer.email\n print(committer_email)\n\n execute = False\n # We assign the executed label to avoid executing this agains the same PR over and over\n # We mark the PR as e2e-executed\n\n for label in labels:\n if re.match(r\".*-.*-.*-.*-.*-.*-.*\", label):\n print('Matching a PR label')\n params = label.split(\"-\")\n distro = params[0]\n driver = params[1]\n master = params[2]\n worker = params[4]\n scenario = params[6]\n execute = True\n remove_label(label, pr, repo)\n break\n\n if execute:\n now = datetime.now()\n now.strftime(\"%m.%d.%Y.%H.%M.%S\")\n job_name = pipeline_id + \"-\" + distro + \"-\" + driver + \"-\" + master + \"-\" + worker + \"-\" + scenario + \"-\" + now.strftime(\"%Y.%m.%d.%H.%M.%S\")\n print(\"Let's run the e2e job, distro %s driver %s \" % (distro, driver))\n print(\"-------------\")\n print(\"-------------\")\n print(\"Running the e2e job for: \" + str(pr.number) + \" \" + pr.title)\n print(\"-------------\")\n print(\"-------------\")\n print(\"-------------\")\n\n # We update the status to show that we are executing the e2e test\n print(\"Current status\")\n print(repo.get_commit(sha=sha).get_statuses())\n repo.get_commit(sha=sha).create_status(state=\"pending\",\n target_url=url + str(pipeline_id),\n description=\"Running...\",\n context=\"%s-%s-%s-controller-%s-compute-%s\" % (distro,\n driver,\n master,\n worker,\n scenario))\n print(\"The pipeline ID is: \" + str(pipeline_id))\n print(\"The clouds.yml path is: \" + str(vars_file_path))\n # We trigger the e2e job\n start_time = time.time()\n try:\n print(\"We call the downstream job configuring its parameters\")\n subprocess.check_call(\"./ci/run_kubeinit.sh %s %s %s %s %s %s %s %s\" % (str(branch.name),\n str(pr.number),\n str(vars_file_path),\n str(distro),\n str(driver),\n str(master),\n str(worker),\n str(scenario)),\n shell=True)\n except Exception as e:\n print('An exception hapened executing Ansible')\n print(e)\n output = 1\n\n try:\n print(\"Render ara data\")\n subprocess.check_call(\"./ci/ara.sh %s\" % (str(job_name) + \"-\" + str(output)), shell=True)\n except Exception as e:\n print('An exception hapened rendering ara data')\n print(e)\n output = 1\n\n print(\"starting the uploader job\")\n # No matter if the job passed or failed we always use go as the suffix\n # upload_error = upload_logs_to_github(str(job_name) + \"-\" + str(output), gh_token)\n upload_error = upload_logs_to_google_cloud(str(job_name) + \"-\" + str(output), gc_token_path)\n render_index(gc_token_path)\n print(\"finishing the uploader job\")\n\n if output == 0:\n state = \"success\"\n else:\n state = \"failure\"\n\n desc = (\"Ended with %s in %s minutes\" % (state, round((time.time() - start_time) / 60, 2)))\n\n print(desc)\n print(state)\n\n if upload_error == 1:\n dest_url = url + str(pipeline_id)\n else:\n dest_url = 'https://storage.googleapis.com/kubeinit-ci/jobs/pr/' + str(job_name) + \"-\" + str(output) + '/index.html'\n\n print(\"The destination URL is: \" + dest_url)\n # We update the status with the job result\n repo.get_commit(sha=sha).create_status(state=state,\n target_url=dest_url,\n description=desc,\n context=\"%s-%s-%s-controller-%s-compute-%s\" % (distro,\n driver,\n master,\n worker,\n scenario))\n else:\n print(\"No need to do anything\")\n if execute:\n exit()",
"def make_jobs(commands, job_prefix, queue, jobs_dir=\"jobs/\",\r\n walltime=\"72:00:00\", ncpus=1, nodes=1, keep_output=\"oe\"):\r\n\r\n filenames = []\r\n create_dir(jobs_dir)\r\n for command in commands:\r\n fd, job_name = mkstemp(dir=jobs_dir, prefix=job_prefix + \"_\",\r\n suffix=\".txt\")\r\n close(fd)\r\n out_fh = open(job_name, \"w\")\r\n\r\n out_fh.write(QSUB_TEXT % (walltime, ncpus, nodes, queue, job_prefix,\r\n keep_output, command))\r\n out_fh.close()\r\n filenames.append(job_name)\r\n return filenames",
"def write_pbs(self):\n fout = open(\"runStarCCM.pbs\", \"w\")\n fout.write(\"#PBS -S /bin/csh\\n\")\n fout.write(\"#PBS -l select=\" + str(self.numNodes) + \":ncpus=\" + str(self.numCPUs) + \":mpiprocs=\" + str(self.mpiProcs) + \":model=has,walltime=\" + self.WallTime + \"\\n\\n\")\n fout.write(\"#PBS -W group_list=\" + self.GroupID + \"\\n\")\n fout.write(\"#PBS -j oe\\n\")\n fout.write(\"#PBS -q \" + self.queue + \"\\n\")\n fout.write(\"#PBS -N \" + self.jobName + \"\\n\")\n fout.write(\"#PBS -m e\\n\")\n fout.write(\"#PBS -W block=true\\n\\n\")\n fout.write(\"cd $PBS_O_WORKDIR\\n\")\n\n if self.runVolGrid == 1:\n #fout.write(\"/bin/rm -f \" + self.simMeshFile + \".sim\\n\")\n fout.write(\"/bin/rm -f starccmMeshRun.out\\n\")\n fout.write(\"chmod u+x \" + self.cshBatch1File + \".csh\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch1File + \".csh -powerOnDemand \" + self.javaBatch1File + \".java >& starccmMeshRun.out\\n\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a mesh run.'\\n\")\n\n if self.runCFD == 1:\n fout.write(\"chmod u+x \" + self.cshBatch2File + \".csh\\n\")\n fout.write(\"/bin/rm -f *.csv *.png starccmFlowRun.out\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch2File + \".csh -powerOnDemand \" + self.javaBatch2File + \".java \" + self.simMeshFile + \" >& starccmFlowRun.out\\n\\n\")\n fout.write(\"# rename the strange file names\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.csv ForceX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.csv ForceY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.csv ForceZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.csv MomentX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.csv MomentY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.csv MomentZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.csv Residuals.csv\\n\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.png ForceX.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.png ForceY.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.png ForceZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.png MomentX.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.png MomentY.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.png MomentZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.png Residuals.png\\n\")\n fout.write(\"/bin/mv \\$PWDUpperCp.png UpperCp.png\\n\")\n fout.write(\"/bin/mv \\$PWDLowerCp.png LowerCp.png\\n\")\n fout.write(\"/bin/rm -rf null\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a CFD run.'\\n\")\n\n fout.close()",
"def submit(model_list, config, rootDir, verbose=False, resubmit=None, runCode=None, runTime = None):\r\n\r\n\toutputPath=config.postProcessOutput()\r\n\t\r\n\t# iterate over list of models to submit\r\n\tfor model in model_list:\r\n\t\t# put some dummy data in the ouput file\r\n\t\tmodelSubmitName=model.submit()\r\n\t\tif verbose: print \"Submitting \",modelSubmitName\r\n\t\twith cd(model.dirPath):\r\n\t\t\tsubprocess.check_output(modelSubmitName, shell=True) # submit the script\r\n\r\n\r\n# end of submit\r\n\treturn True # submission worked!\r"
] | [
"0.7209775",
"0.6585105",
"0.6554315",
"0.64478266",
"0.6284272",
"0.62066483",
"0.61065704",
"0.60671365",
"0.6054951",
"0.60292995",
"0.5986418",
"0.597275",
"0.59198385",
"0.5856972",
"0.5800811",
"0.577773",
"0.5759242",
"0.57556045",
"0.5738472",
"0.5737264",
"0.5733993",
"0.57285595",
"0.57191616",
"0.56889784",
"0.5673297",
"0.56720227",
"0.56450224",
"0.5622982",
"0.56191146",
"0.56135744"
] | 0.73230857 | 0 |
Writes a parallelized workflow by using UNIX run background feature (&). Creates sub shell scripts that contain the workflow for each input file separately. After this main shell script is written, where each workflow is set to run as background process by using the shell & character. Workflow parts are separated by wait command to synchronize progress between parts. | def write_unix(workloads, input_file_parameters, command_line_parameters):
workload_index = 0
workload_zfill_amount = len(str(len(workloads)))
background_process_list = []
for workload in workloads:
# Each workflow part will have separate file to submit to TORQUE with
# sbatch command. Each file has one or more associated subshell files
# containing contents for each thread.
# Generate strings describing current workload and thread indexes for
# output file names
workload_index += 1
workload_index_string = str(workload_index).zfill(workload_zfill_amount)
file_main_name = '{0}_UNIX_WORKLOAD_1'.format(NAME)
# Add information about current workflow to the main shell script
background_process_list.append('echo "Running workload part {0}"'.format(
workload_index))
# When --fix_run mode is used the output and log files files already
# exist. To prevent overwriting these files with new ones specific
# prefix or appendix strings are added to the new output file names.
appendix = '.sh'
i = 0
if command_line_parameters.fix_run:
mode = 'FIX'
elif command_line_parameters.compress_run == 'compress':
mode = 'COMPRESS'
elif command_line_parameters.compress_run == 'decompress':
mode = 'DECOMPRESS'
else:
mode = None
while mode is 'FIX' and os.path.exists(os.path.join(input_file_parameters.output_dir,
file_main_name + appendix)):
i += 1
appendix = '_{0}_{1}.sh'.format(mode, i)
if mode in ('COMPRESS', 'DECOMPRESS'):
appendix = '_{0}.sh'.format(mode)
while os.path.exists(os.path.join(input_file_parameters.output_dir,
file_main_name + appendix)):
i += 1
appendix = '_{0}_{1}.sh'.format(mode, i)
# Generate subshell files
thread_index = 0
thread_zfill_amount = len(str(len(workload)))
for thread_contents in workload:
# Iterate over output commands of each thread and write necessary
# subshell files for each
out_lines = []
cmds_in_thread = len(thread_contents)
for i in xrange(cmds_in_thread):
# Check if any modules need loading or are they loaded by previous command
skip_module_loading = False
if i > 0:
if thread_contents[i].load_module == thread_contents[i-1].load_module:
skip_module_loading = True
# Check if any modules need unloading or will they be used by following command
skip_module_unloading = False
if i < cmds_in_thread-1:
if thread_contents[i].load_module == thread_contents[i+1].load_module:
skip_module_unloading = True
out_lines += generate_subshell_file_contents(thread_contents[i],
skip_module_loading,
skip_module_unloading)
# Write subshell file
thread_index_string = str(thread_index).zfill(thread_zfill_amount)
fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,
workload_index_string,
thread_index_string,
appendix)
try:
out_fl = open(os.path.join(input_file_parameters.output_dir,
fl_name), 'w')
except:
raise STAPLERerror.STAPLERerror('Unable to create output file:'
'\n{0}'.format(os.path.join(
input_file_parameters.output_dir,
fl_name)))
out_fl.write('\n'.join(out_lines))
out_fl.write('\n')
out_fl.close()
# i.e. use UNIX source to run input shell script, redirect stdout
# and stderr to an .out file.
background_process_list.append('source {0} >> {0}.out 2>&1 &'.format(
os.path.join(input_file_parameters.output_dir,
fl_name)))
thread_index += 1
# Workflow steps are written to a single output file (instead of
# separate files). "wait" command is inserted in between workflow parts
# to synchronize workflows.
background_process_list.append('wait\n\n')
# Write the main shell script file
resmng_config = list(input_file_parameters.resource_manager_params)
resmng_config.append('\n\n')
resmng_config.append('\n'.join(background_process_list))
out_fl_path = os.path.join(input_file_parameters.output_dir, file_main_name + appendix)
try:
out_fl = open(out_fl_path, 'w')
except IOError as emsg:
raise STAPLERerror.STAPLERerror('Unable to create output file:'
'\n{0}\n with error message:\n{1}'
.format(os.path.join(input_file_parameters.output_dir,
file_main_name + appendix),
str(emsg)))
out_fl.write('\n'.join(resmng_config))
out_fl.write('\n')
out_fl.close()
return [out_fl_path] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_slurm(workloads, input_file_parameters, command_line_parameters):\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to SLURM with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_SBATCH_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n prefix = ''\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n prefix = '{0}_{1}_'.format(mode, i)\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index += 1\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n\r\n # Create lines for SLURM input file by generating job-name, output,\r\n # error and array parameters based on user input\r\n status_file_basename = os.path.join(input_file_parameters.output_dir,\r\n prefix + input_file_parameters.job_name)\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('#SBATCH --job-name={0}'.format(input_file_parameters.job_name))\r\n resmng_config.append('#SBATCH --output={0}_%A_%a.out'.format(status_file_basename))\r\n resmng_config.append('#SBATCH --error={0}_%A_%a.err'.format(status_file_basename))\r\n resmng_config.append('#SBATCH --array={0}-{1}'.format(1, len(workload)))\r\n\r\n resmng_config.append('\\n\\n')\r\n subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n '\"$SLURM_ARRAY_TASK_ID\"',\r\n appendix)\r\n subshell_file_path = os.path.join(input_file_parameters.output_dir,\r\n subshell_file_path)\r\n resmng_config.append('source {0}'.format(subshell_file_path))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir,file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def write_torque(workloads, input_file_parameters, command_line_parameters):\r\n validate_resource_manager_parameters(\r\n input_file_parameters.resource_manager_params,\r\n ['#PBS -k', '#PBS -N', '#PBS -d', '#PBS -e', '#PBS -t'])\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_TORQUE_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n thread_index += 1\r\n\r\n # Create lines for TORQUE input file by generating job-name, output,\r\n # error and array parameters based on user input\r\n\r\n # IF YOU ADD NEW AUTOMATICALLY INFERRED PARAMETERS, REMEMBER TO VALIDATE\r\n # THEM AT THE BEGINNING OF THIS FUNCTION\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('#PBS -k eo')\r\n resmng_config.append('#PBS -N {0}'.format(input_file_parameters.job_name))\r\n resmng_config.append('#PBS -d {0}'.format(input_file_parameters.output_dir))\r\n resmng_config.append('#PBS -e {0}'.format(input_file_parameters.output_dir))\r\n resmng_config.append('#PBS -t {0}-{1}'.format(0, len(workload)-1))\r\n\r\n resmng_config.append('\\n\\n')\r\n subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n '\"${PBS_ARRAYID}\"',\r\n appendix)\r\n subshell_file_path = os.path.join(input_file_parameters.output_dir,\r\n subshell_file_path)\r\n resmng_config.append('source {0}'.format(subshell_file_path))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir,file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def write_lsf(workloads, input_file_parameters, command_line_parameters):\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_LSF_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 0\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n thread_index += 1\r\n\r\n # Generate parameter file for the bsub run\r\n resmng_config = []\r\n resmng_config.append('#BSUB-J \"{0}[1-{1}]\"'.format(\r\n input_file_parameters.job_name,\r\n len(workload)))\r\n resmng_config.append('#BSUB-i {0}_WORKLOAD_{1}_subshell_{2}{3}'.format(\r\n NAME,\r\n workload_index_string,\r\n '%I',\r\n appendix))\r\n resmng_config.append('#BSUB-o {0}_WORKLOAD_{1}_subshell_{2}{3}.out'.format(\r\n NAME,\r\n workload_index_string,\r\n '%I',\r\n appendix))\r\n resmng_config += input_file_parameters.resource_manager_params\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir, file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def job_workflow(workflow, jobfiles, jwcl=WCL()):\n #pylint: disable=protected-access,expression-not-assigned,lost-exception\n global pool\n global results\n global stop_all\n global jobfiles_global\n global job_track\n global keeprunning\n global donejobs\n global result_lock\n global lock_monitor\n\n infullnames = {}\n with open(workflow, 'r') as workflowfh:\n # for each wrapper execution\n lines = workflowfh.readlines()\n sys.stdout.flush()\n inputs = {}\n # read in all of the lines in dictionaries\n for linecnt, line in enumerate(lines):\n wrapnum = miscutils.fwsplit(line.strip())[0]\n task = parse_wrapper_line(line, linecnt)\n #task['logfile'] = None\n wcl = WCL()\n with open(task['wclfile'], 'r') as wclfh:\n wcl.read(wclfh, filename=task['wclfile'])\n wcl.update(jwcl)\n\n # get fullnames for inputs and outputs\n ins, _ = intgmisc.get_fullnames(wcl, wcl, None)\n del wcl\n # save input filenames to eliminate from junk tarball later\n infullnames[wrapnum] = []\n for isect in ins:\n for ifile in ins[isect]:\n infullnames[wrapnum].append(ifile)\n jobfiles['infullnames'].extend(ifile)\n inputs[wrapnum] = (task, copy.deepcopy(jobfiles), jwcl, ins)\n job_track[task['wrapnum']] = (task['logfile'], jobfiles)\n # get all of the task groupings, they will be run in numerical order\n tasks = jwcl[\"fw_groups\"].keys()\n tasks.sort()\n # loop over each grouping\n manager = mp.Manager()\n for task in tasks:\n results = [] # the results of running each task in the group\n # get the maximum number of parallel processes to run at a time\n nproc = int(jwcl[\"fw_groups\"][task][\"fw_nthread\"])\n procs = miscutils.fwsplit(jwcl[\"fw_groups\"][task][\"wrapnums\"])\n tempproc = []\n # pare down the list to include only those in this run\n for p in procs:\n if p in inputs.keys():\n tempproc.append(p)\n procs = tempproc\n if nproc > 1:\n numjobs = len(procs)\n # set up the thread pool\n pool = mp.Pool(processes=nproc, maxtasksperchild=2)\n outq = manager.Queue()\n errq = manager.Queue()\n with lock_monitor:\n try:\n donejobs = 0\n # update the input files now, so that it only contains those from the current taks(s)\n for inp in procs:\n jobfiles_global['infullnames'].extend(infullnames[inp])\n # attach all the grouped tasks to the pool\n [pool.apply_async(job_thread, args=(inputs[inp] + (outq, errq, True,),), callback=results_checker) for inp in procs]\n pool.close()\n time.sleep(10)\n while donejobs < numjobs and keeprunning:\n count = 0\n while count < 2:\n count = 0\n try:\n msg = outq.get_nowait()\n print msg\n except:\n count += 1\n try:\n errm = errq.get_nowait()\n sys.stderr.write(errm)\n except:\n count += 1\n time.sleep(.1)\n except:\n results.append(1)\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback,\n limit=4, file=sys.stdout)\n\n raise\n\n finally:\n if stop_all and max(results) > 0:\n # wait to give everything time to do the first round of cleanup\n time.sleep(20)\n # get any waiting messages\n for _ in range(1000):\n try:\n msg = outq.get_nowait()\n print msg\n except:\n break\n for _ in range(1000):\n try:\n errm = errq.get_nowait()\n sys.stderr.write(errm)\n except:\n break\n if not result_lock.acquire(False):\n lock_monitor.wait(60)\n else:\n result_lock.release()\n # empty the worker queue so nothing else starts\n terminate(force=True)\n # wait so everything can clean up, otherwise risk a deadlock\n time.sleep(50)\n del pool\n while True:\n try:\n msg = outq.get(timeout=.1)\n print msg\n except:\n break\n\n while True:\n try:\n errm = errq.get(timeout=.1)\n sys.stderr.write(errm)\n except:\n break\n # in case the sci code crashed badly\n if not results:\n results.append(1)\n jobfiles = jobfiles_global\n jobfiles['infullnames'] = list(set(jobfiles['infullnames']))\n if stop_all and max(results) > 0:\n return max(results), jobfiles\n # if running in single threaded mode\n else:\n temp_stopall = stop_all\n stop_all = False\n\n donejobs = 0\n for inp in procs:\n try:\n jobfiles_global['infullnames'].extend(infullnames[inp])\n results_checker(job_thread(inputs[inp] + (sys.stdout, sys.stderr, False,)))\n except:\n (extype, exvalue, trback) = sys.exc_info()\n traceback.print_exception(extype, exvalue, trback, file=sys.stdout)\n results = [1]\n jobfiles = jobfiles_global\n if results[-1] != 0:\n return results[-1], jobfiles\n stop_all = temp_stopall\n\n\n return 0, jobfiles",
"def write_sge(workloads, input_file_parameters, command_line_parameters):\r\n validate_resource_manager_parameters(\r\n input_file_parameters.resource_manager_params,\r\n ['# -o', '# -e', '# -t'])\r\n\r\n workload_index = 0\r\n workload_zfill_amount = len(str(len(workloads)))\r\n workload_file_paths = []\r\n for workload in workloads:\r\n # Each workflow part will have separate file to submit to TORQUE with\r\n # sbatch command. Each file has one or more associated subshell files\r\n # containing contents for each thread.\r\n\r\n # Generate strings describing current workload and thread indexes for\r\n # output file names\r\n workload_index += 1\r\n workload_index_string = str(workload_index).zfill(workload_zfill_amount)\r\n file_main_name = '{0}_SGE_WORKLOAD_{1}'.format(NAME,\r\n workload_index_string)\r\n\r\n # When --fix_run mode is used the output and log files files already\r\n # exist. To prevent overwriting these files with new ones specific\r\n # prefix or appendix strings are added to the new output file names.\r\n prefix = ''\r\n appendix = '.sh'\r\n i = 0\r\n if command_line_parameters.fix_run:\r\n mode = 'FIX'\r\n elif command_line_parameters.compress_run == 'compress':\r\n mode = 'COMPRESS'\r\n elif command_line_parameters.compress_run == 'decompress':\r\n mode = 'DECOMPRESS'\r\n else:\r\n mode = None\r\n while mode is not None and os.path.exists(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)):\r\n i += 1\r\n prefix = '{0}_{1}_'.format(mode, i)\r\n appendix = '_{0}_{1}.sh'.format(mode, i)\r\n\r\n # Generate subshell files\r\n thread_index = 1\r\n for thread_contents in workload:\r\n # Iterate over output commands of each thread and write necessary\r\n # subshell files for each\r\n out_lines = []\r\n cmds_in_thread = len(thread_contents)\r\n for i in xrange(cmds_in_thread):\r\n # Check if any modules need loading or are they loaded by previous command\r\n skip_module_loading = False\r\n if i > 0:\r\n if thread_contents[i].load_module == thread_contents[i-1].load_module:\r\n skip_module_loading = True\r\n # Check if any modules need unloading or will they be used by following command\r\n skip_module_unloading = False\r\n if i < cmds_in_thread-1:\r\n if thread_contents[i].load_module == thread_contents[i+1].load_module:\r\n skip_module_unloading = True\r\n out_lines += generate_subshell_file_contents(thread_contents[i],\r\n skip_module_loading,\r\n skip_module_unloading)\r\n\r\n\r\n # Write subshell file\r\n thread_index_string = str(thread_index)\r\n fl_name = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n thread_index_string,\r\n appendix)\r\n try:\r\n out_fl = open(os.path.join(input_file_parameters.output_dir,\r\n fl_name), 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(\r\n input_file_parameters.output_dir,\r\n fl_name)))\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n thread_index += 1\r\n\r\n # Create lines for SGE input file by generating job-name, output,\r\n # error and array parameters based on user input\r\n\r\n status_file_basename = os.path.join(input_file_parameters.output_dir,\r\n prefix +\r\n input_file_parameters.job_name + '_$TASK_ID')\r\n\r\n # IF YOU ADD NEW AUTOMATICALLY INFERRED PARAMETERS, REMEMBER TO VALIDATE\r\n # THEM AT THE BEGINNING OF THIS FUNCTION\r\n resmng_config = list(input_file_parameters.resource_manager_params)\r\n resmng_config.append('#$ -o {0}.out'.format(status_file_basename))\r\n resmng_config.append('#$ -e {0}.err'.format(status_file_basename))\r\n resmng_config.append('#$ -t {0}-{1}'.format(1, len(workload)))\r\n\r\n resmng_config.append('\\n\\n')\r\n subshell_file_path = '{0}_WORKLOAD_{1}_subshell_{2}{3}'.format(NAME,\r\n workload_index_string,\r\n '\"$SGE_TASK_ID\"',\r\n appendix)\r\n subshell_file_path = os.path.join(input_file_parameters.output_dir,\r\n subshell_file_path)\r\n resmng_config.append('source {0}'.format(subshell_file_path))\r\n\r\n out_fl_path = os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix)\r\n workload_file_paths.append(out_fl_path)\r\n try:\r\n out_fl = open(out_fl_path, 'w')\r\n\r\n except IOError as emsg:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}\\n with error message:\\n{1}'\r\n .format(os.path.join(input_file_parameters.output_dir,\r\n file_main_name + appendix),\r\n str(emsg)))\r\n out_fl.write('\\n'.join(resmng_config))\r\n out_fl.write('\\n')\r\n out_fl.close()\r\n return workload_file_paths",
"def gen_jobs(fpath, num_runs, netid):\n\n run = \"\"\n run += \"import sys\\n\"\n run += \"import subprocess\\n\"\n run += \"cmd_array = (\"\n for i in range(num_runs):\n run += \"r\\\"python test.py %d\\\"\" % i\n run += \",\\n\"\n\n run += \")\\n\"\n run += \"p = subprocess.Popen(cmd_array[int(sys.argv[1])-1], shell=True, stdout=subprocess.PIPE)\\n\"\n run += \"out = p.stdout.read()\"\n# run += \"print cmd_array[int(sys.argv[1])]\"\n\n script_name = \"test\"\n\n if verbose:\n print \"Writing array script: \" + \"run.\" + script_name + \".py\"\n f = open(os.path.join(fpath, \"run.\" + script_name + \".py\"), 'w')\n f.write(\"%s\\n\" % run)\n\n f = open(os.path.join(fpath, \"submit_run.\" + script_name + \".sh\"), 'w')\n submit_run = \"#!/bin/csh\\n\"\n submit_run += \"#$ -N %s\\n\" % (\"job_%d\" % num_runs)\n submit_run += \"#$ -t 1:%d\\n\" % (num_runs)\n submit_run += \"#$ -M %[email protected]\\n\\n\" % (netid)\n# submit_run += \"#$ -q short\"\n# submit_run += \"#$ -r y\"\n submit_run += \"python run.%s.py ${SGE_TASK_ID}\" % (script_name)\n\n if verbose:\n print \"Writing submit shell script: \" + \"submit_run.\" + script_name + \".sh\"\n f.write(\"%s\\n\" % submit_run)",
"def write_default(workflows, output_dir):\r\n\r\n # Calculate the total number of commands\r\n number_of_commands = 0\r\n for workflow in workflows:\r\n number_of_commands += sum(map(len, workflow))\r\n\r\n # Create command line strings\r\n i = 0\r\n out_lines = ['echo Started executing shell script at:', 'date']\r\n for workflow in workflows:\r\n for workflow_step in workflow:\r\n for cmd in workflow_step:\r\n i += 1\r\n cmd_list = cmd.command_lines\r\n cmd_list = map(clean_command_lines, cmd_list)\r\n out_lines.append('echo Executing command {0}/{1}:'\r\n .format(i, number_of_commands))\r\n for c in cmd_list:\r\n c = c.replace('>', '\\\\>')\r\n c = c.replace('|', '\\\\|')\r\n out_lines.append('echo ' + c)\r\n out_lines.append('date')\r\n\r\n #Load modules\r\n if cmd.load_module:\r\n for module in cmd.load_module:\r\n out_lines.append(module)\r\n\r\n #The command\r\n out_lines += cmd_list\r\n\r\n #Unload modules\r\n if cmd.unload_module:\r\n for module in cmd.unload_module:\r\n out_lines.append(module)\r\n out_lines.append('echo Finished at:')\r\n out_lines.append('date')\r\n\r\n #Open and write command lines\r\n fl_name = '{0}_output_{1}.sh'.format(NAME, START_TIME)\r\n output_file_path = os.path.join(output_dir, fl_name)\r\n try:\r\n out_fl = open(output_file_path, 'w')\r\n except:\r\n raise STAPLERerror.STAPLERerror('Unable to create output file:'\r\n '\\n{0}'.format(os.path.join(output_dir,\r\n fl_name)))\r\n out_fl.write('#!/usr/bin/env bash\\n')\r\n out_fl.write('\\n'.join(out_lines))\r\n out_fl.close()\r\n return [output_file_path]",
"def prepare_parafly_slurm_job_script(sBasename_job, sBasename_parafly, sDirectory_job, sEmail, iWalltime_in = None, nNode_in = None, nThread_in=None, sJob_name_in =None, sPython_env_in =None, sQueue_in=None):\n if iWalltime_in is not None:\n iWalltime = iWalltime_in \n else:\n iWalltime = 2\n if nNode_in is not None:\n iNode = nNode_in \n else:\n iNode = 1\n if nThread_in is not None:\n nThread = nThread_in \n else:\n nThread = 40\n \n if sJob_name_in is not None:\n sJob_name = sJob_name_in \n else:\n sJob_name = 'parafly'\n if sPython_env_in is not None:\n sPython_env = sPython_env_in \n else:\n sPython_env = 'base'\n \n if sQueue_in is not None:\n sQueue = sQueue_in \n else:\n sQueue = 'short'\n \n sWalltime =\"{:0d}\".format(iWalltime )\n sNode = \"{:0d}\".format(iNode )\n sThread = \"{:0d}\".format(nThread )\n \n os.chdir(sDirectory_job)\n \n ofs = open(sBasename_job,\"w\") #write mode \n sLine = '#!/bin/bash' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --account=esmd' + '\\n'\n ofs.write( sLine ) \n\n #sLine = '#SBATCH --begin=now+1minutes' + '\\n'\n #ofs.write( sLine ) \n\n sLine = '#SBATCH --cpus-per-task=1 ' + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --dependency=singleton ' + '\\n'\n ofs.write( sLine )\n sLine = '#SBATCH --error=stderr_%j.err' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --job-name=' + sJob_name + ' # create a name for your job' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --mail-type=ALL' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --mail-user=' + sEmail + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --nodes=' + sNode + ' # node count' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --ntasks=' + sThread + ' # total number of tasks' + '\\n'\n ofs.write( sLine ) \n sLine = '#SBATCH --output=stdout_%j.out' + '\\n'\n ofs.write( sLine ) \n\n sLine = '#SBATCH --partition=' + sQueue + '\\n' #can be improved here\n ofs.write( sLine ) \n sLine = '#SBATCH --time=' + sWalltime +':00:00 # total run time limit (HH:MM:SS)' + '\\n'\n ofs.write( sLine ) \n\n sLine = 'module purge' + '\\n'\n ofs.write( sLine ) \n sLine = 'module load parafly/2013' + '\\n'\n ofs.write( sLine ) \n sLine = 'module load anaconda3/2019.03' + '\\n'\n ofs.write( sLine ) \n sLine = 'source /share/apps/anaconda3/2019.03/etc/profile.d/conda.sh' + '\\n'\n ofs.write( sLine ) \n sLine = 'unset PYTHONHOME' + '\\n'\n ofs.write( sLine ) \n sLine = 'conda activate ' + sPython_env + '\\n'\n ofs.write( sLine ) \n\n sLine = 'ParaFly -c ' + sBasename_parafly + ' -CPU ' + sThread + ' -failed_cmds rerun.txt' + '\\n'\n ofs.write( sLine ) \n \n sLine = 'echo \" Job \" ' + '${SLURM_JOBID}' + ' is launched' + '\\n'\n ofs.write( sLine ) \n\n sLine = 'conda deactivate' + '\\n'\n ofs.write( sLine ) \n \n sLine = 'echo \"Finished\"' + '\\n'\n ofs.write( sLine ) \n ofs.close() \n \n return",
"def run_job(args):\n\n global stop_all\n global jobfiles_global\n global jobwcl\n\n jobwcl = WCL()\n jobfiles = {'infullnames': [args.config, args.workflow],\n 'outfullnames': [],\n 'output_putinfo': {}}\n jobfiles_global = {'infullnames': [args.config, args.workflow],\n 'outfullnames': [],\n 'output_putinfo': {}}\n\n jobstart = time.time()\n with open(args.config, 'r') as wclfh:\n jobwcl.read(wclfh, filename=args.config)\n jobwcl['verify_files'] = miscutils.checkTrue('verify_files', jobwcl, False)\n jobwcl['jobroot'] = os.getcwd()\n jobwcl['job_max_usage'] = 0\n #jobwcl['pre_job_disk_usage'] = pfwutils.diskusage(jobwcl['jobroot'])\n jobwcl['pre_job_disk_usage'] = 0\n\n # Save pointers to archive information for quick lookup\n if jobwcl[pfwdefs.USE_HOME_ARCHIVE_INPUT] != 'never' or \\\n jobwcl[pfwdefs.USE_HOME_ARCHIVE_OUTPUT] != 'never':\n jobwcl['home_archive_info'] = jobwcl[pfwdefs.SW_ARCHIVESECT][jobwcl[pfwdefs.HOME_ARCHIVE]]\n else:\n jobwcl['home_archive_info'] = None\n\n if jobwcl[pfwdefs.USE_TARGET_ARCHIVE_INPUT] != 'never' or \\\n jobwcl[pfwdefs.USE_TARGET_ARCHIVE_OUTPUT] != 'never':\n jobwcl['target_archive_info'] = jobwcl[pfwdefs.SW_ARCHIVESECT][jobwcl[pfwdefs.TARGET_ARCHIVE]]\n else:\n jobwcl['target_archive_info'] = None\n\n # run the tasks (i.e., each wrapper execution)\n stop_all = miscutils.checkTrue('stop_on_fail', jobwcl, True)\n\n try:\n jobfiles['infullnames'] = gather_initial_fullnames()\n jobfiles_global['infullnames'].extend(jobfiles['infullnames'])\n miscutils.coremakedirs('log')\n miscutils.coremakedirs('outputwcl')\n exitcode, jobfiles = job_workflow(args.workflow, jobfiles, jobwcl)\n except Exception:\n (extype, exvalue, trback) = sys.exc_info()\n print '!' * 60\n traceback.print_exception(extype, exvalue, trback, file=sys.stdout)\n exitcode = pfwdefs.PF_EXIT_FAILURE\n print \"Aborting rest of wrapper executions. Continuing to end-of-job tasks\\n\\n\"\n\n try:\n create_junk_tarball(jobwcl, jobfiles, exitcode)\n except:\n print \"Error creating junk tarball\"\n # if should transfer at end of job\n if jobfiles['output_putinfo']:\n print \"\\n\\nCalling file transfer for end of job (%s files)\" % \\\n (len(jobfiles['output_putinfo']))\n\n copy_output_to_archive(jobwcl, jobfiles, jobfiles['output_putinfo'], 'job',\n 'job_output', exitcode)\n else:\n print \"\\n\\n0 files to transfer for end of job\"\n if miscutils.fwdebug_check(1, \"PFWRUNJOB_DEBUG\"):\n miscutils.fwdebug_print(\"len(jobfiles['outfullnames'])=%s\" % \\\n (len(jobfiles['outfullnames'])))\n print \"\\nDESDMTIME: pfwrun_job %0.3f\" % (time.time()-jobstart)\n return exitcode",
"def parallel(files):\n return list(map(join_process, list(map(start_process, files))))",
"def build_submission_script(path,\n script_name,\n save_history=True,\n walltime=10,\n allocation='p30653',\n cores=1,\n memory=4):\n\n # define paths\n path = abspath(path)\n job_script_path = join(path, 'scripts', 'submit.sh')\n\n # copy run script to scripts directory\n run_script = abspath(__file__).rsplit('/', maxsplit=2)[0]\n run_script = join(run_script, 'scripts', script_name)\n shutil.copy(run_script, join(path, 'scripts'))\n\n # determine queue\n if walltime <= 4:\n queue = 'short'\n elif walltime <= 48:\n queue = 'normal'\n else:\n queue = 'long'\n\n # declare outer script that reads PATH from file\n job_script = open(job_script_path, 'w')\n job_script.write('#!/bin/bash\\n')\n\n # move to job directory\n job_script.write('cd {:s} \\n\\n'.format(path))\n\n # begin outer script for processing job\n job_script.write('while IFS=$\\'\\\\t\\' read P\\n')\n job_script.write('do\\n')\n job_script.write('b_id=$(echo $(basename ${P}) | cut -f 1 -d \\'.\\')\\n')\n job_script.write(' JOB=`msub - << EOJ\\n\\n')\n\n # =========== begin submission script for individual batch ============\n job_script.write('#! /bin/bash\\n')\n job_script.write('#MSUB -A {:s} \\n'.format(allocation))\n job_script.write('#MSUB -q {:s} \\n'.format(queue))\n job_script.write('#MSUB -l walltime={0:02d}:00:00 \\n'.format(walltime))\n job_script.write('#MSUB -m abe \\n')\n #job_script.write('#MSUB -M [email protected] \\n')\n job_script.write('#MSUB -o ./log/${b_id}/outlog \\n')\n job_script.write('#MSUB -e ./log/${b_id}/errlog \\n')\n job_script.write('#MSUB -N ${b_id} \\n')\n job_script.write('#MSUB -l nodes=1:ppn={:d} \\n'.format(cores))\n job_script.write('#MSUB -l mem={:d}gb \\n\\n'.format(memory))\n\n # load python module and metabolism virtual environment\n job_script.write('module load python/anaconda3.6\\n')\n job_script.write('source activate ~/pythonenvs/growth_env\\n\\n')\n\n # move to job directory\n job_script.write('cd {:s} \\n\\n'.format(path))\n\n # run script\n job_script.write('python ./scripts/{:s}'.format(script_name)+' ${P} ')\n args = (save_history,)\n job_script.write('-s {:d}\\n'.format(*args))\n job_script.write('EOJ\\n')\n job_script.write('`\\n\\n')\n # ============= end submission script for individual batch ============\n\n # print job id\n #job_script.write('echo \"JobID = ${JOB} submitted on `date`\"\\n')\n job_script.write('done < ./batches/index.txt \\n')\n job_script.write('echo \"All batches submitted as of `date`\"\\n')\n job_script.write('exit\\n')\n\n # close the file\n job_script.close()\n\n # change the permissions\n chmod(job_script_path, 0o755)",
"def main() -> co.Parallel:\n actors = [\"Oprah Winfrey\", \"Kate Mara\", \"Don Cheadle\", \"Dwayne Johnson\"]\n root = co.Parallel(image=_get_image())\n for actor in actors:\n root[actor] = co.Lazy(\n f\"python pipeline.py all_by_actor '{actor}'\"\n )\n return root",
"def _construct_walk_corpus_and_write_multiprocess(filebase,walk_times,headflag_of_index_file = '',\n\t\t\t\t\t\t\t\t\t\t\t\t max_num_workers=cpu_count()):\n\t# allocate walk times to workers\n\tif walk_times <= max_num_workers:\n\t\ttimes_per_worker = [1 for _ in range(walk_times)]\n\telse:\n\t\tdiv, mod = divmod(walk_times, max_num_workers)\n\t\ttimes_per_worker = [div for _ in range(max_num_workers)]\n\t\tfor idx in range(mod):\n\t\t\ttimes_per_worker[idx] = times_per_worker[idx] + 1\n\tassert sum(times_per_worker) == walk_times, 'workers allocating failed: %d != %d' % (\n\tsum(times_per_worker), walk_times)\n\n\tfiles_list = [\"{}.{}\".format(filebase, str(x)) for x in range(len(times_per_worker))]\n\tf = open(filebase, 'w')\n\tf.write('{}\\n'.format(headflag_of_index_file))\n\tf.write('DESCRIPTION: allocate %d workers to concurrently walk %d times.\\n' % (len(times_per_worker), walk_times))\n\tf.write('DESCRIPTION: generate %d files to save walk corpus:\\n' % (len(times_per_worker)))\n\tfor item in files_list:\n\t\tf.write('FILE: {}\\n'.format(item))\n\tf.close()\n\n\tfiles = []\n\targs_list = []\n\tfor index in range(len(times_per_worker)):\n\t\targs_list.append((files_list[index], times_per_worker[index]))\n\n\tlogger.info('Corpus bulid: walking to files (using %d workers for multi-process)...' % len(times_per_worker))\n\ttime_start = time.time()\n\twith ProcessPoolExecutor(max_workers=max_num_workers) as executor:\n\t# # the walker for node2vec is so large that we can not use multi-process, so we use multi-thread instead.\n\t# with ThreadPoolExecutor(max_workers=max_num_workers) as executor:\n\t\tfor file_ in executor.map(_construct_walk_corpus_and_write_singprocess, args_list):\n\t\t\tfiles.append(file_)\n\tassert len(files) == len(files_list), 'ProcessPoolExecutor occured error, %d!=%d' % (len(files), len(files_list))\n\n\tlogger.info('Corpus bulid: walk completed in {}s'.format(time.time() - time_start))\n\treturn files",
"def test_background_process(self):\n first = \"\"\"file://B <- file://A\n sleep 1\n echo A produces B > B\n \"\"\"\n\n pp = ProjectParser()\n pp.set_project(first)\n workflow = pp.parse_extend_and_check_project()\n process = workflow._processes[0]\n\n wr = WorkflowRuner(3)\n wr.init_workers()\n try:\n wr.start_process_in_background(process)\n assert wr.active_workers()\n timeout = time() + 1.5\n while time() < timeout and not wr._completed_processes:\n sleep(0.1)\n assert time() < timeout, \"Process should have stoped now\"\n finally:\n wr.terminate_workers_and_clean_subprocesses()",
"def write_pbs(self):\n fout = open(\"runStarCCM.pbs\", \"w\")\n fout.write(\"#PBS -S /bin/csh\\n\")\n fout.write(\"#PBS -l select=\" + str(self.numNodes) + \":ncpus=\" + str(self.numCPUs) + \":mpiprocs=\" + str(self.mpiProcs) + \":model=has,walltime=\" + self.WallTime + \"\\n\\n\")\n fout.write(\"#PBS -W group_list=\" + self.GroupID + \"\\n\")\n fout.write(\"#PBS -j oe\\n\")\n fout.write(\"#PBS -q \" + self.queue + \"\\n\")\n fout.write(\"#PBS -N \" + self.jobName + \"\\n\")\n fout.write(\"#PBS -m e\\n\")\n fout.write(\"#PBS -W block=true\\n\\n\")\n fout.write(\"cd $PBS_O_WORKDIR\\n\")\n\n if self.runVolGrid == 1:\n #fout.write(\"/bin/rm -f \" + self.simMeshFile + \".sim\\n\")\n fout.write(\"/bin/rm -f starccmMeshRun.out\\n\")\n fout.write(\"chmod u+x \" + self.cshBatch1File + \".csh\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch1File + \".csh -powerOnDemand \" + self.javaBatch1File + \".java >& starccmMeshRun.out\\n\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a mesh run.'\\n\")\n\n if self.runCFD == 1:\n fout.write(\"chmod u+x \" + self.cshBatch2File + \".csh\\n\")\n fout.write(\"/bin/rm -f *.csv *.png starccmFlowRun.out\\n\")\n # do not use >>& because it will fail in some environment\n fout.write(\"./\" + self.cshBatch2File + \".csh -powerOnDemand \" + self.javaBatch2File + \".java \" + self.simMeshFile + \" >& starccmFlowRun.out\\n\\n\")\n fout.write(\"# rename the strange file names\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.csv ForceX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.csv ForceY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.csv ForceZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.csv MomentX.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.csv MomentY.csv\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.csv MomentZ.csv\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.csv Residuals.csv\\n\\n\")\n fout.write(\"/bin/mv \\$PWDForceX.png ForceX.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceY.png ForceY.png\\n\")\n fout.write(\"/bin/mv \\$PWDForceZ.png ForceZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentX.png MomentX.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentY.png MomentY.png\\n\")\n fout.write(\"/bin/mv \\$PWDMomentZ.png MomentZ.png\\n\")\n fout.write(\"/bin/mv \\$PWDResiduals.png Residuals.png\\n\")\n fout.write(\"/bin/mv \\$PWDUpperCp.png UpperCp.png\\n\")\n fout.write(\"/bin/mv \\$PWDLowerCp.png LowerCp.png\\n\")\n fout.write(\"/bin/rm -rf null\\n\")\n else:\n fout.write(\"echo 'User chooses not to make a CFD run.'\\n\")\n\n fout.close()",
"def generateParallelScript(hub, user_name, server_list):\n all_tasks = []\n slot_names = hub['SlotIO'].keys()\n\n for slot_name in slot_names:\n vivado = f'VIV_VER={args.vivado_version} vivado -mode batch -source {slot_name}_synth.tcl'\n \n # broadcast the results\n transfer = []\n for server in server_list:\n transfer.append(f'rsync_with_retry.sh --target-server {server} --user-name {user_name} --dir-to-sync {synth_dir}/{slot_name}/')\n transfer_str = \" && \".join(transfer)\n\n command = f'cd {synth_dir}/{slot_name} && {vivado} && {transfer_str}'\n all_tasks.append(command)\n\n num_job_server = math.ceil(len(all_tasks) / len(server_list) ) \n for i, server in enumerate(server_list):\n local_tasks = all_tasks[i * num_job_server: (i+1) * num_job_server]\n open(f'{synth_dir}/parallel_slot_synth_{server}.txt', 'w').write('\\n'.join(local_tasks))",
"def go(self):\n\n self._write_master()\n num_fofs = self.fofs['fofid'].max()\n fof_splits = split.get_splits(num_fofs, self['chunksize'])\n\n njobs=0\n fobj=None\n\n icondor=0\n for isplit,fof_split in enumerate(fof_splits):\n if njobs % self['jobs_per_sub']==0:\n if fobj is not None:\n fobj.close()\n fobj = self._open_condor_script(icondor)\n icondor += 1\n\n self._write_split(fobj, isplit, fof_split)\n\n njobs += 1",
"def run_workflow(EMBEDDING_BASE_PATH):\n train_tweets_path, val_tweets_path, test_tweets_path, image_dataset = run_pre_workflow()\n\n input_images, train_tweets, val_tweets, test_tweets, glove_embeddings = replica_catalog(train_tweets_path, val_tweets_path, test_tweets_path, image_dataset, EMBEDDING_BASE_PATH)\n\n preprocess_tweets, preprocess_images, train_resnet, hpo_train_resnet, train_bilstm, hpo_train_bilstm, resnet_inference, bilstm_inference, late_fusion = transformation_catalog()\n \n sites_catalog()\n\n pegasus_properties()\n \n wf = Workflow('Crisis_Computing_Workflow')\n\n # --------------------------------------------------- TEXT PIPELINE ------------------------------------------------------ \n\n # Job 1: Preprocess tweets\n preprocessed_train_tweets = File('preprocessed_train_tweets.csv')\n preprocessed_val_tweets = File('preprocessed_val_tweets.csv')\n preprocessed_test_tweets = File('preprocessed_test_tweets.csv')\n \n job_preprocess_tweets = [Job(preprocess_tweets) for i in range(3)]\n job_preprocess_tweets[0].add_inputs(train_tweets)\n job_preprocess_tweets[0].add_outputs(preprocessed_train_tweets)\n job_preprocess_tweets[0].add_args('--filename', 'train_tweets.csv')\n \n job_preprocess_tweets[1].add_inputs(val_tweets)\n job_preprocess_tweets[1].add_outputs(preprocessed_val_tweets)\n job_preprocess_tweets[1].add_args('--filename', 'val_tweets.csv')\n \n job_preprocess_tweets[2].add_inputs(test_tweets)\n job_preprocess_tweets[2].add_outputs(preprocessed_test_tweets)\n job_preprocess_tweets[2].add_args('--filename', 'test_tweets.csv')\n\n\n # Job 2: HPO Bi-LSTM\n bilstm_best_params = File('best_bilstm_hpo_params.txt')\n\n job_hpo_train_bilstm = Job(hpo_train_bilstm)\\\n .add_inputs(glove_embeddings, preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets)\\\n .add_outputs(bilstm_best_params)\\\n .add_args('--trials', BILSTM_NUM_TRIALS)\n\n\n # Job 3: Train Bi-LSTM using best parameters from HPO study and output loss and accuracy curves\n trained_bilstm_model = File('bilstm_final_model.h5') \n bilstm_loss_curve = File('Loss_curve_bilstm.png')\n bilstm_accuracy_curve = File('Accuracy_curve_bilstm.png')\n\n\n job_train_bilstm = Job(train_bilstm)\\\n .add_inputs(glove_embeddings, preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets, bilstm_best_params)\\\n .add_outputs(bilstm_loss_curve, bilstm_accuracy_curve, trained_bilstm_model)\\\n\n\n # Job 4: Run inference on best Bi-LSTM model to produce output on test dataset along with confusion matrix\n bilstm_train_output_prob = File('bilstm_train_output.csv')\n bilstm_test_output_prob = File('bilstm_test_output.csv')\n bilstm_confusion_matrix = File('bilstm_confusion_matrix.png')\n\n job_bilstm_inference = Job(bilstm_inference)\\\n .add_inputs(preprocessed_train_tweets, preprocessed_val_tweets, preprocessed_test_tweets, trained_bilstm_model)\\\n .add_outputs(bilstm_train_output_prob, bilstm_test_output_prob, bilstm_confusion_matrix)\n\n\n # --------------------------------------------------- IMAGE PIPELINE ------------------------------------------------------ \n\n \n # Job 1: Preprocess images\n prefix = \"resized_\"\n job_preprocess_images = [Job(preprocess_images) for i in range(NUM_WORKERS)]\n resized_images = split_preprocess_jobs(job_preprocess_images, input_images, prefix)\n\n # Job 2: HPO ResNet-50\n resnet_best_params = File('best_resnet_hpo_params.txt')\n\n job_hpo_train_resnet = Job(hpo_train_resnet)\\\n .add_inputs(*resized_images)\\\n .add_args('--trials', RESNET_NUM_TRIALS)\\\n .add_outputs(resnet_best_params)\\\n .add_profiles(Namespace.PEGASUS, key=\"maxwalltime\", value=MAXTIMEWALL)\n\n\n # Job 3: Train ResNet-50 using best parameters from HPO study and output loss and accuracy curves\n trained_resnet_model = File('resnet_final_model.pth')\n resnet_loss_curve = File('Loss_curve_resnet.png')\n resnet_accuracy_curve = File('Accuracy_curve_resnet.png')\n\n job_train_resnet = Job(train_resnet)\\\n .add_inputs(*resized_images, resnet_best_params)\\\n .add_outputs(resnet_loss_curve, resnet_accuracy_curve, trained_resnet_model)\\\n .add_profiles(Namespace.PEGASUS, key=\"maxwalltime\", value=MAXTIMEWALL)\n\n\n # Job 4: Run inference on best ResNet-50 model to produce output on test dataset along with confusion matrix\n resnet_train_output_prob = File('resnet_train_output.csv')\n resnet_confusion_matrix = File('resnet_confusion_matrix.png')\n resnet_test_output_prob = File('resnet_test_output.csv') \n\n job_resnet_inference = Job(resnet_inference)\\\n .add_inputs(*resized_images, trained_resnet_model)\\\n .add_outputs(resnet_train_output_prob, resnet_test_output_prob, resnet_confusion_matrix)\n\n \n \n # --------------------------------------------------- LATE FUSION ------------------------------------------------------ \n\n # Job 1: Late Fusion\n confusion_matrix_MPC = File('late_fusion_MPC.png')\n confusion_matrix_LR = File('late_fusion_LR.png')\n confusion_matrix_MLP = File('late_fusion_MLP.png')\n report_MLP = File('late_fusion_MLP.csv')\n report_MPC = File('late_fusion_MPC.csv')\n report_LR = File('late_fusion_LR.csv')\n\n job_late_fusion = Job(late_fusion)\\\n .add_inputs(resnet_train_output_prob, resnet_test_output_prob, bilstm_train_output_prob, bilstm_test_output_prob)\\\n .add_outputs(confusion_matrix_MPC, confusion_matrix_LR, confusion_matrix_MLP, report_MLP, report_MPC, report_LR)\n\n wf.add_jobs(*job_preprocess_tweets, *job_preprocess_images, job_bilstm_inference, job_hpo_train_bilstm, job_train_bilstm, job_hpo_train_resnet, job_train_resnet, job_resnet_inference, job_late_fusion)\n\n try:\n wf.plan(submit=False, sites=[\"donut\"], output_sites=[\"donut\"], dir=\"submit\")\n #wf.wait()\n #wf.statistics()\n except PegasusClientError as e:\n print(e.output)\n \n #plot_workflow_graph(wf)\n \n return",
"def main(workdir):\n dir = os.path.expanduser(workdir)\n \n #read the .dat file\n f = open('{}smi.dat'.format(dir))\n par = imp.load_source('par', '', f)\n \n #make a sdf file for visualization\n output = pybel.Outputfile(\"sdf\", dir + \"species.sdf\",overwrite=True)\n for name in par.smiles:\n smi = par.smiles[name]\n obmol = pybel.readstring(\"smi\",smi)\n output.write(obmol)\n output.close()\n \n #list with the jobs that need to be done\n jobs = []\n \n #iterate the input files\n for name in par.smiles:\n #name = input_file.replace('.inp','') #name of the calculation\n test_dir = dir + name #location where the calculations will be done\n if not os.path.exists(test_dir):\n os.mkdir(test_dir)\n \n #copy the input file to the working directory\n write_input_file(par,name,par.smiles[name],test_dir + '/input.inp')\n job = workdir + name + '/'\n jobs.append(job)\n \n run_threads(jobs, 'eric', max_running = 3)",
"def parallel_generate_particle_distribution(self, max_loop = np.inf, Ncore = 1, outfile=None):\n \n self.pos = np.zeros((self.N_part, 3))\n self.vel = np.zeros((self.N_part, 3))\n \n \n # start running\n nmax = self.N_part / Ncore\n #pool = Pool(processes = Ncore)\n #pool.apply_async(_while_loop,)\n #result = pool.map(_while_loop, args=(self, nmax, max_loop,))\n #print result.get(timeout = 100)\n #p = Process(target=_while_loop, args=(nmax, max_loop,))\n jobs = []\n for i in np.arange(Ncore):\n p = multiprocessing.Process(target=_while_loop, args=(self, nmax, max_loop, \n Ncore, outfile,))\n jobs.append(p)\n p.start()\n \n for p in jobs:\n p.join()\n \n #results = [None]*self.N_part\n #results = [OUTPUT.get() for p in jobs]\n \n #results = np.array(results)\n \n #pos = results[:,0]\n #pos = pos.reshape(self.N_part,3)\n #self.pos = pos\n \n #vel = results[:,1]\n #vel = vel.reshape(self.N_part,3)\n #self.vel = vel\n \n \n #if (not outfile == None):\n # self.write_pd(outfile)\n # combine to a single output\n bash_command = \"cat \"\n for i in np.arange(Ncore) + 1:\n temp_name = outfile + \"_%02i_\"%(i) + \".temp\"\n bash_command = bash_command + temp_name + \" \"\n bash_command = bash_command + \"> \" + outfile\n os.system(bash_command)\n \n # now remove temporary files\n bash_command = \"rm \"\n for i in np.arange(Ncore) + 1:\n temp_name = outfile + \"_%02i_\"%(i) + \".temp\"\n bash_command = bash_command + temp_name + \" \"\n os.system(bash_command)\n \n bash_command = \"sed -i -e '1i#m x y z vx vy vz\\' \" + outfile\n os.system(bash_command)\n self.load_particle_ic(outfile)\n \n return self.pos, self.vel",
"def create_subworkflow_file(self, workflow: Workflow, props: PropertySet):",
"def create_script(sh_file, cmds, max_workers, num_nodes=1):\n output = os.path.dirname(sh_file)\n job_name = os.path.splitext(os.path.basename(sh_file))[0]\n err_file = os.path.join(output,\"{0}.error\".format(job_name))\n complete_file = os.path.join(output, \"{0}.complete\".format(job_name))\n with open(sh_file, 'w') as of:\n of.write(\"#!/bin/bash\\n\")\n of.write(\"#PBS -N {0}\\n\".format(job_name))\n of.write(\"#PBS -l nodes={0}:ppn={1}\\n\".format(num_nodes,max_workers))\n of.write(\"#PBS -l walltime=2:30:00\\n\")\n of.write(\"#PBS -l vmem=8g\\n\")\n of.write(\"#PBS -j eo\\n\")\n of.write(\"#PBS Join_Path={0}\\n\".format(os.path.join(output,\"%s.err\"%job_name)))\n of.write(\"module load samtools/1.9\\n\")\n of.write(\"module load bedtools/2.27.1\\n\")\n of.write(\"{0}\\n\".format(cmds[0]))\n of.write(\"if [ $? -ne 0 ]; then \\n\\ttouch {0};exit 1 \\nfi\\n\".format(err_file))\n of.write(\"{0}\\n\".format(cmds[1]))\n of.write(\"if [ $? -ne 0 ]; then \\n\\ttouch {0}\\nelse\\n\\ttouch {1} \\nfi\\n\".format(err_file, complete_file))\n os.system(\"chmod 755 %s\" % sh_file)",
"def build_job_scripts(model_list, scenario_list, output_dir, cassandra_config_dir, cassandra_log_dir,\n cassandra_main_script, sbatch_account, sbatch_partition='slurm', sbatch_walltime='01:00:00',\n sbatch_ntasks=3, sbatch_nodes=3, sbatch_jobname='cassie', sbatch_logdir='.', template=None):\n\n # use default configuration template file if user does not give one\n if template is None:\n template = pkg_resources.resource_filename('cassie', 'data/sbatch_template.sh')\n\n # existing tags to replace in the template file\n model_tag = '<model>'\n scenario_tag = '<scenario>'\n account_tag = '<account>'\n partition_tag = '<partition>'\n ntasks_tag = '<ntasks>'\n nodes_tag = '<nodes>'\n time_tag = '<walltime>'\n jobname_tag = '<jobname>'\n logdir_tag = '<logdir>'\n cassandra_configdir_tag = '<cassconfigdir>'\n cassandra_logdir_tag = '<casslogdir>'\n cassandra_script_tag = '<cassmainscript>'\n\n for model in model_list:\n for scenario in scenario_list:\n\n output_file = os.path.join(output_dir, f'run_{model.lower()}_{scenario}.sh')\n\n with open(output_file, 'w') as out:\n with open(template) as get:\n\n f = get.read()\n\n # replace tag names with dynamic content\n fx = f.replace(model_tag, model)\n fx = fx.replace(scenario_tag, scenario)\n\n fx = fx.replace(account_tag, sbatch_account)\n fx = fx.replace(partition_tag, sbatch_partition)\n fx = fx.replace(ntasks_tag, str(sbatch_ntasks))\n fx = fx.replace(nodes_tag, str(sbatch_nodes))\n fx = fx.replace(time_tag, sbatch_walltime)\n fx = fx.replace(jobname_tag, sbatch_jobname)\n fx = fx.replace(logdir_tag, sbatch_logdir)\n\n fx = fx.replace(cassandra_configdir_tag, cassandra_config_dir)\n fx = fx.replace(cassandra_logdir_tag, cassandra_log_dir)\n fx = fx.replace(cassandra_script_tag, cassandra_main_script)\n\n out.write(fx)",
"def main():\n init()\n separator_len = 40\n for s in stage_instances:\n print('='*separator_len)\n print(s.name)\n print('-'*separator_len)\n\n s.add_tasks() # Add tasks from previous stage\n s.revive_or_archive() # Revive killed tasks or move them to failed\n s.schedule_jobs() # Schedule new jobs if needed\n s.print_status()\n print('='*separator_len + '\\n')\n render(stage_instances)",
"def __multi_process(args):\n Write.write_car(*args)",
"def write_pbs_runjob(name, nnodes, nprocessors, pmem, walltime, binary):\n runjob = open('runjob', 'w')\n runjob.write('#!/bin/sh\\n')\n runjob.write('#PBS -N {}\\n'.format(name))\n runjob.write('#PBS -o test.out\\n')\n runjob.write('#PBS -e test.err\\n')\n runjob.write('#PBS -r n\\n')\n runjob.write('#PBS -l walltime={}\\n'.format(walltime))\n runjob.write('#PBS -l nodes={}:ppn={}\\n'.format(nnodes, nprocessors))\n runjob.write('#PBS -l pmem={}\\n'.format(pmem))\n runjob.write('#PBS -W group_list=hennig\\n\\n')\n runjob.write('cd $PBS_O_WORKDIR\\n\\n')\n runjob.write('mpirun {} > job.log\\n\\n'.format(binary))\n runjob.write('echo \\'Done.\\'\\n')\n runjob.close()",
"def create_job(jobrun, vcf_filenames):\n if jobrun == \"cluster\":\n \"\"\"\n Supports only PBS clusters for now.\n \"\"\"\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M [email protected]\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n for i in pbs_scripts:\n print \"Running: qsub %s\" % i\n #os.system(\"qsub %s\" % i)\n\n elif jobrun == \"parallel-local\":\n \"\"\"\n Generate a Command list of each job and run it in parallel on different cores available on local system\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n f3 = open(command_file, 'w+')\n\n\n for i in vcf_filenames:\n job_name = os.path.basename(i)\n job_print_string = \"#PBS -N %s\\n#PBS -M [email protected]\\n#PBS -m abe\\n#PBS -V\\n#PBS -l nodes=1:ppn=4,pmem=4000mb,walltime=72:00:00\\n#PBS -q fluxod\\n#PBS -A esnitkin_fluxod\\n#PBS -l qos=flux\\n\\n/home/apirani/anaconda/bin/python /nfs/esnitkin/bin_group/scripts/Scripts_v2.0/variants_position_analysis/reason_job.py -filter2_only_snp_vcf_dir %s -filter2_only_snp_vcf_file %s\\n\" % (job_name, args.filter2_only_snp_vcf_dir, i)\n job_file_name = \"%s.pbs\" % (i)\n f1=open(job_file_name, 'w+')\n f1.write(job_print_string)\n f1.close()\n #os.system(\"mv %s/*.pbs %s/temp\" % (args.filter2_only_snp_vcf_dir, args.filter2_only_snp_vcf_dir))\n pbs_dir = args.filter2_only_snp_vcf_dir + \"/*.pbs\"\n pbs_scripts = glob.glob(pbs_dir)\n\n\n for i in pbs_scripts:\n f3.write(\"bash %s\\n\" % i)\n f3.close()\n with open(command_file, 'r') as fpp:\n for lines in fpp:\n lines = lines.strip()\n command_array.append(lines)\n fpp.close()\n print len(command_array)\n if args.numcores:\n num_cores = int(num_cores)\n else:\n num_cores = multiprocessing.cpu_count()\n results = Parallel(n_jobs=num_cores)(delayed(run_command)(command) for command in command_array)\n\n elif jobrun == \"parallel-single-cluster\":\n print \" \"\n else:\n \"\"\"\n Generate a Command list of each job and run it on local system one at a time\n \"\"\"\n command_array = []\n command_file = \"%s/commands_list.sh\" % args.filter2_only_snp_vcf_dir\n os.system(\"bash %s\" % command_file)",
"def setup_jobs(self):\n transfer_args = [\"analysis_type\", \"perturbation\", \"num_permutations\", \"permutation_test_statistic\", \"loss_function\",\n \"importance_significance_level\", \"window_search_algorithm\", \"window_effect_size_threshold\"]\n jobs = [None] * self.num_jobs\n for idx in range(self.num_jobs):\n # Create and launch condor job\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n input_files = [features_filename, self.args.model_filename, self.args.model_loader_filename, self.args.data_filename]\n job_dir = f\"{self.args.output_dir}/outputs_{idx}\"\n cmd = f\"python3 -m anamod.core.worker -worker_idx {idx}\"\n for arg in transfer_args:\n if hasattr(self.args, arg):\n cmd += f\" -{arg} {getattr(self.args, arg)}\"\n # Relative file paths for non-shared FS, absolute for shared FS\n for name, path in dict(output_dir=job_dir, features_filename=features_filename, model_filename=self.args.model_filename,\n model_loader_filename=self.args.model_loader_filename, data_filename=self.args.data_filename).items():\n cmd += f\" -{name} {os.path.abspath(path)}\" if self.args.shared_filesystem else f\" -{name} {os.path.basename(path)}\"\n job = CondorJobWrapper(cmd, input_files, job_dir, shared_filesystem=self.args.shared_filesystem,\n memory=f\"{self.args.memory_requirement}GB\", disk=f\"{self.args.disk_requirement}GB\",\n avoid_bad_hosts=self.args.avoid_bad_hosts, retry_arbitrary_failures=self.args.retry_arbitrary_failures,\n cleanup=self.args.cleanup)\n jobs[idx] = job\n return jobs",
"def write_input_files(pst, pst_path=\".\"):\n par = pst.parameter_data.copy()\n par.index = par.index.str.lower()\n par.loc[:, \"parval1_trans\"] = (par.parval1 * par.scale) + par.offset\n pairs = np.array(list(zip(pst.template_files, pst.input_files)))\n num_tpl = len(pairs)\n chunk_len = 50\n num_chunk_floor = num_tpl // chunk_len\n main_chunks = (\n pairs[: num_chunk_floor * chunk_len].reshape([-1, chunk_len, 2]).tolist()\n ) # the list of files broken down into chunks\n remainder = pairs[num_chunk_floor * chunk_len :].tolist() # remaining files\n chunks = main_chunks + [remainder]\n # procs = []\n # for chunk in chunks:\n # # write_to_template(pst.parameter_data.parval1_trans,os.path.join(pst_path,tpl_file),\n # # os.path.join(pst_path,in_file))\n # p = mp.Process(\n # target=_write_chunk_to_template,\n # args=[chunk, pst.parameter_data.parval1_trans, pst_path],\n # )\n # p.start()\n # procs.append(p)\n # for p in procs:\n # p.join()\n pool = mp.Pool(processes=min(mp.cpu_count(), len(chunks), 60))\n x = [\n pool.apply_async(\n _write_chunk_to_template,\n args=(chunk, par.parval1_trans, pst_path),\n )\n for i, chunk in enumerate(chunks)\n ]\n [xx.get() for xx in x]\n pool.close()\n pool.join()",
"def submit(slurm_folder, nord=False):\r\n for files in slurm_folder:\r\n if not nord:\r\n call([\"sbatch\", \"{}\".format(files)])\r\n else:\r\n os.system(\"bsub < {}\".format(files))"
] | [
"0.6447484",
"0.6394443",
"0.62565774",
"0.6216091",
"0.6154703",
"0.6106836",
"0.608601",
"0.5807879",
"0.57455873",
"0.57444465",
"0.5742326",
"0.569049",
"0.5653086",
"0.5634561",
"0.5627084",
"0.56151015",
"0.5591978",
"0.54646945",
"0.5456538",
"0.5420467",
"0.5411606",
"0.5408692",
"0.53748184",
"0.5364944",
"0.53564733",
"0.5312851",
"0.5312058",
"0.5287448",
"0.5285899",
"0.5251726"
] | 0.74553514 | 0 |
test all ssh kwargs are not excluded from kwargs when preparing the SSH opts | def test_ssh_kwargs(test_opts):
opt_key = test_opts[0]
opt_value = test_opts[1]
# Is the kwarg in salt.utils.parsers?
in_parser = test_opts[2]
opts = {
"eauth": "auto",
"username": "test",
"password": "test",
"client": "ssh",
"tgt": "localhost",
"fun": "test.ping",
opt_key: opt_value,
}
client = salt.client.ssh.client.SSHClient(disable_custom_roster=True)
if in_parser:
ssh_kwargs = salt.utils.parsers.SaltSSHOptionParser().defaults
assert opt_key in ssh_kwargs
with patch("salt.roster.get_roster_file", MagicMock(return_value="")), patch(
"salt.client.ssh.shell.gen_key"
), patch("salt.fileserver.Fileserver.update"), patch("salt.utils.thin.gen_thin"):
ssh_obj = client._prep_ssh(**opts)
assert ssh_obj.opts.get(opt_key, None) == opt_value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _BuildSshOptions(self, batch, ask_key, use_cluster_key,\n strict_host_check, private_key=None, quiet=True,\n port=None):\n options = [\n \"-oEscapeChar=none\",\n \"-oHashKnownHosts=no\",\n \"-oGlobalKnownHostsFile=%s\" % pathutils.SSH_KNOWN_HOSTS_FILE,\n \"-oUserKnownHostsFile=/dev/null\",\n \"-oCheckHostIp=no\",\n ]\n\n if use_cluster_key:\n options.append(\"-oHostKeyAlias=%s\" % self.cluster_name)\n\n if quiet:\n options.append(\"-q\")\n\n if private_key:\n options.append(\"-i%s\" % private_key)\n\n if port:\n options.append(\"-oPort=%d\" % port)\n\n # TODO: Too many boolean options, maybe convert them to more descriptive\n # constants.\n\n # Note: ask_key conflicts with batch mode\n if batch:\n if ask_key:\n raise errors.ProgrammerError(\"SSH call requested conflicting options\")\n\n options.append(\"-oBatchMode=yes\")\n\n if strict_host_check:\n options.append(\"-oStrictHostKeyChecking=yes\")\n else:\n options.append(\"-oStrictHostKeyChecking=no\")\n\n else:\n # non-batch mode\n\n if ask_key:\n options.append(\"-oStrictHostKeyChecking=ask\")\n elif strict_host_check:\n options.append(\"-oStrictHostKeyChecking=yes\")\n else:\n options.append(\"-oStrictHostKeyChecking=no\")\n\n if self.ipv6:\n options.append(\"-6\")\n else:\n options.append(\"-4\")\n\n return options",
"def split_remote_kwargs(cls, kwargs, include=None, skip=None):\n include = make_list(include) if include else []\n skip = make_list(skip) if skip else []\n transfer_kwargs = {\n name: kwargs.pop(name)\n for name in [\"cache\", \"prefer_cache\", \"retries\", \"retry_delay\"] + include\n if name in kwargs and name not in skip\n }\n return transfer_kwargs, kwargs",
"def testExtraArgsSSHTunnel(self):\n fake_ip_addr = \"1.1.1.1\"\n fake_rsa_key_file = \"/tmp/rsa_file\"\n fake_target_vnc_port = 8888\n target_adb_port = 9999\n ssh_user = \"fake_user\"\n fake_port = 12345\n self.Patch(utils, \"PickFreePort\", return_value=fake_port)\n self.Patch(utils, \"_ExecuteCommand\")\n self.Patch(subprocess, \"check_call\", return_value=True)\n extra_args_ssh_tunnel = \"-o command='shell %s %h' -o command1='ls -la'\"\n utils.AutoConnect(ip_addr=fake_ip_addr,\n rsa_key_file=fake_rsa_key_file,\n target_vnc_port=fake_target_vnc_port,\n target_adb_port=target_adb_port,\n ssh_user=ssh_user,\n client_adb_port=fake_port,\n extra_args_ssh_tunnel=extra_args_ssh_tunnel)\n args_list = [\"-i\", \"/tmp/rsa_file\",\n \"-o\", \"UserKnownHostsFile=/dev/null\",\n \"-o\", \"StrictHostKeyChecking=no\",\n \"-L\", \"12345:127.0.0.1:9999\",\n \"-L\", \"12345:127.0.0.1:8888\",\n \"-N\", \"-f\", \"-l\", \"fake_user\", \"1.1.1.1\",\n \"-o\", \"command=shell %s %h\",\n \"-o\", \"command1=ls -la\"]\n first_call_args = utils._ExecuteCommand.call_args_list[0][0]\n self.assertEqual(first_call_args[1], args_list)",
"def test_ssh_cmd_no_user(self):\n self.assertEqual(general.ssh_command(None,'example.com',('ls','-l')).command_line,\n ['ssh','example.com','ls','-l'])",
"def test_checkParameters(self):\n self.failUnlessEqual(self.nice.opts['long'], \"Alpha\")\n self.failUnlessEqual(self.nice.opts['another'], \"Beta\")\n self.failUnlessEqual(self.nice.opts['longonly'], \"noshort\")\n self.failUnlessEqual(self.nice.opts['shortless'], \"Gamma\")",
"def _verify_command_states(cls, kwargs):\n return kwargs",
"def test_parse_kwargs_multirounds(self):\n parser = ParlaiParser(True, False)\n opt = parser.parse_kwargs(\n task='integration_tests', mutators='episode_shuffle', preserve_context=True\n )\n assert opt['preserve_context'] is True\n opt = parser.parse_kwargs(\n task='integration_tests', mutators='episode_shuffle', preserve_context=False\n )\n assert opt['preserve_context'] is False\n\n with self.assertRaises(KeyError):\n parser.parse_kwargs(\n task='integration_tests', mutators='episode_shuffle', fake_option=False\n )\n\n with self.assertRaises(KeyError):\n parser.parse_kwargs(task='integration_tests', fake_option=False)",
"def split_transfer_kwargs(kwargs, skip=None):\n skip = make_list(skip) if skip else []\n transfer_kwargs = {\n name: kwargs.pop(name)\n for name in [\"cache\", \"prefer_cache\", \"retries\", \"retry_delay\"]\n if name in kwargs and name not in skip\n }\n return transfer_kwargs, kwargs",
"def test_args_none():\n args = cli.parse_args([])\n assert not args.copy\n assert not args.paste\n assert args.file is None\n assert not args.debug",
"def test_individual_valid(self, kwargs):\n # defaults\n final_kwargs = {'script': 'echo \"hello world\"', 'title': '', 'debug': False, 'strict': False,\n 'dry_run': False, 'item': None, 'env': {}, 'model': {}, 'variables': {}}\n final_kwargs.update(kwargs)\n\n config = ShellConfig(**final_kwargs)\n for key, value in final_kwargs.items():\n assert_that(key in config.__dict__, equal_to(True))\n assert_that(config.__dict__[key], equal_to(value))",
"def _verify_arguments(self):\n # if self.options.action == \"create\":\n # if self.options.encrypt_payload and not self.options.payload_secret:\n # self.parser.error('A secret must be supplied with --payload-secret option when the --encrypt-payload option is in use.')\n pass",
"def exclude_opts(cls) -> Tuple[str, ...]:\n return \"required\", \"print_config\", \"config\", \"ngpu\"",
"def common_args(revision=None, branch=None, ssh_username=None, ssh_key=None):\n args = []\n if ssh_username or ssh_key:\n opt = ['-e', 'ssh']\n if ssh_username:\n opt[1] += ' -l %s' % ssh_username\n if ssh_key:\n opt[1] += ' -i %s' % ssh_key\n args.extend(opt)\n if revision:\n args.extend(['-r', revision])\n elif branch:\n if hg_ver() >= (1, 6, 0):\n args.extend(['-b', branch])\n return args",
"def _accept_or_ignore_job_kwargs(self, _exclude_errors=(), **kwargs):\n errors = {}\n if kwargs:\n for field_name in kwargs.keys():\n errors[field_name] = [_(\"Field is not allowed on launch.\")]\n return ({}, kwargs, errors)",
"def test_config_opts(sc):\n assert sc.server_name is not None\n assert sc.deployment == Deployment.stg\n assert sc.admins is not None\n assert sc.command_handler is not None\n assert sc.command_handler_work_dir is not None\n assert sc.command_handler_pvc_env_var is not None\n assert sc.command_handler_image_reference is not None\n assert sc.command_handler_k8s_namespace is not None\n assert sc.fas_password is not None\n assert sc.testing_farm_secret is not None\n assert sc.github_requests_log_path is not None\n assert sc.webhook_secret is not None\n assert sc.validate_webhooks is not None\n assert sc.gitlab_token_secret is not None",
"def _validate_kwargs(exclude=[], **kwargs) -> None:\n valid_kwargs = [\n # \"auto_reconnect\",\n \"keep_alive\",\n \"proxy_options\",\n \"websockets\",\n ]\n\n for kwarg in kwargs:\n if (kwarg not in valid_kwargs) or (kwarg in exclude):\n # NOTE: TypeError is the conventional error that is returned when an invalid kwarg is\n # supplied. It feels like it should be a ValueError, but it's not.\n raise TypeError(\"Unsupported keyword argument: '{}'\".format(kwarg))",
"def cleanOptions(options):\r\n daemonize = options.pop('daemonize')\r\n _reload = options.pop('reload')\r\n dev = options.pop('dev')\r\n opts = []\r\n store_true = [\r\n '--nocache', '--global_cache', '--traceback', '--quiet', '--loud'\r\n ]\r\n store_false = []\r\n for key, value in options.iteritems():\r\n key = '--' + key\r\n if (key in store_true and value) or (key in store_false and not value):\r\n opts += [key, ]\r\n elif value:\r\n opts += [key, str(value)]\r\n return daemonize, _reload, opts",
"def test_parse_tgt_no_user(opts):\n host = \"localhost\"\n opts[\"ssh_user\"] = \"ssh-usr\"\n opts[\"tgt\"] = host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n assert not opts.get(\"ssh_cli_tgt\")\n client = ssh.SSH(opts)\n assert client.parse_tgt[\"hostname\"] == host\n assert client.parse_tgt[\"user\"] == opts[\"ssh_user\"]\n assert opts.get(\"ssh_cli_tgt\") == host",
"def get_unpinned_params(opts, params):\n return params - set([p for p, v in opts.__dict__.items() if p in LIKELIHOOD_PINNABLE_PARAMS and v is not None])",
"def test_docker_args_set_multi(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n docker_args: --privileged -v /tmp/:/tmp/\n \"\"\"\n )\n\n config = scuba.config.load_config(\".scuba.yml\")\n assert config.docker_args == [\"--privileged\", \"-v\", \"/tmp/:/tmp/\"]",
"def sufficient_options(self):\n has_token = self.opts.get('token')\n has_project_domain_or_tenant = (self.opts.get('project_id') or\n (self.opts.get('project_name') and\n (self.opts.get('user_domain_name') or\n self.opts.get('user_domain_id'))) or\n (self.opts.get('tenant_id') or\n self.opts.get('tenant_name')))\n has_credential = (self.opts.get('username')\n and has_project_domain_or_tenant\n and self.opts.get('password')\n and self.opts.get('auth_url'))\n missing = not (has_token or has_credential)\n if missing:\n missing_opts = []\n opts = ['token', 'endpoint', 'username', 'password', 'auth_url',\n 'tenant_id', 'tenant_name']\n for opt in opts:\n if not self.opts.get(opt):\n missing_opts.append(opt)\n raise exceptions.AuthPluginOptionsMissing(missing_opts)",
"def test_checkCustoms(self):\n self.failUnlessEqual(self.nice.opts['myflag'], \"PONY!\")\n self.failUnlessEqual(self.nice.opts['myparam'], \"Tofu WITH A PONY!\")",
"def _filter_conn_kwargs(self, conn_kwargs):\n if conn_kwargs is None:\n return None\n if hasattr(self.__redis_mod.connection, \"URL_QUERY_ARGUMENT_PARSERS\"):\n parsers = self.__redis_mod.connection.URL_QUERY_ARGUMENT_PARSERS\n else:\n parsers = self.URL_QUERY_ARGUMENT_PARSERS\n # We don't want to monkey patch the class' dictionary, hence the copy\n all_parsers = self.__class__.EXTRA_ARGUMENT_PARSERS.copy()\n all_parsers.update(parsers)\n return {\n k: all_parsers[k](v) for k, v in conn_kwargs.items() if k in all_parsers\n }",
"def test_docker_args_not_set(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n \"\"\"\n )\n\n config = scuba.config.load_config(\".scuba.yml\")\n assert config.docker_args is None",
"def AddSSHArgs(parser):\n parser.add_argument(\n '--ssh-flag',\n action='append',\n help=\"\"\"\\\n Additional flags to be passed to *ssh(1)*. It is recommended that flags\n be passed using an assignment operator and quotes. Example:\n\n $ {command} example-instance --zone=us-central1-a --ssh-flag=\"-vvv\" --ssh-flag=\"-L 80:localhost:80\"\n\n This flag will replace occurences of ``%USER%'' and ``%TPU%'' with\n their dereferenced values. For example, passing ``80:%TPU%:80`` into\n the flag is equivalent to passing ``80:162.222.181.197:80'' to *ssh(1)*\n if the external IP address of 'example-instance' is 162.222.181.197.\n\n If connecting to the instance's external IP, then %TPU% is replaced\n with that, otherwise it is replaced with the internal IP.\n \"\"\",\n )\n\n parser.add_argument(\n 'user_queued_resource',\n completer=completers.InstancesCompleter,\n metavar='[USER@]QR',\n help=\"\"\"\\\n Specifies the Cloud TPU Queued Resource to send SSH command to.\n\n ``USER'' specifies the username with which to SSH. If omitted, the user\n login name is used.\n\n ``QR'' specifies the name of the Cloud TPU Queued Resource to send SSH command to.\n \"\"\",\n )\n\n parser.add_argument(\n 'ssh_args',\n nargs=argparse.REMAINDER,\n help=\"\"\"\\\n Flags and positionals passed to the underlying ssh implementation.\n \"\"\",\n example=\"\"\"\\\n $ {command} example-instance --zone=us-central1-a -- -vvv -L 80:%TPU%:80\n \"\"\",\n )\n\n parser.add_argument(\n '--node',\n default='0',\n help=\"\"\"\\\n TPU node(s) to connect to. The supported value is a single 0-based\n index of the node(s) in the case of a TPU Pod. When also using the\n `--command` flag, it additionally supports a comma-separated list\n (e.g. '1,4,6'), range (e.g. '1-3'), or special keyword ``all\" to\n run the command concurrently on each of the specified node(s).\n\n Note that when targeting multiple nodes, you should run 'ssh-add'\n with your private key prior to executing the gcloud command. Default:\n 'ssh-add ~/.ssh/google_compute_engine'.\n \"\"\",\n )",
"def _validate_kwargs(self, kwargs):\n pass",
"def __checkArgs(self, kwargs):\n requiredArgs = self.__class__.__requiredArgs + \\\n self.__class__.__singleCompArgs if self.singleComp else\\\n self.__class__.__requiredArgs + self.__class__.__doubleCompArgs\n for arg in requiredArgs:\n if arg not in kwargs:\n raise ValueError(\"Essential keyword argument %s missing\" % arg)\n for (k, v) in kwargs.items():\n assert k in self.__class__.__allowedArgs, \"Invalid Argument %s\" % k",
"def test_backwards_compat_kwargs_duplicate_check(\n kwargs: t.Dict[str, t.Any]\n) -> None:\n with pytest.raises(ValueError) as err:\n pypiserver.backwards_compat_kwargs(kwargs)\n assert \"('redirect_to_fallback', 'disable_fallback')\" in str(err.value)",
"def test_args_without_match(self):\n args = [self.service, self.env, \"--secret_file\", \"test_data/parameters/test.cnf.parameters.json\"]\n with self.assertRaises(ValueError):\n ef_password.handle_args_and_set_context(args)",
"def test_invalid_adapter_opts(self):\n self.oslo_config_dict['heat'] = {\n 'interface': 'public',\n 'valid_interfaces': 'private',\n }\n self.assert_service_disabled(\n 'orchestration',\n \"Encountered an exception attempting to process config for \"\n \"project 'heat' (service type 'orchestration'): interface and \"\n \"valid_interfaces are mutually exclusive.\",\n )"
] | [
"0.61168206",
"0.6079297",
"0.59761137",
"0.5973248",
"0.5916606",
"0.58925116",
"0.57788223",
"0.5721382",
"0.56176704",
"0.55686975",
"0.5542059",
"0.5488901",
"0.54882175",
"0.54571706",
"0.544993",
"0.5444227",
"0.5437077",
"0.54274124",
"0.54254705",
"0.53893155",
"0.53870136",
"0.5371983",
"0.5342214",
"0.5329801",
"0.5326872",
"0.5313785",
"0.5305533",
"0.5287895",
"0.5256743",
"0.52563703"
] | 0.7008141 | 0 |
test expand_target when host is not included in the rosterdata | def test_expand_target_no_host(opts, tmp_path):
host = "127.0.0.1"
user = "test-user@"
opts["tgt"] = user + host
roster = """
localhost: 127.0.0.1
"""
roster_file = str(tmp_path / "test_roster_no_host")
with salt.utils.files.fopen(roster_file, "w") as fp:
salt.utils.yaml.safe_dump(salt.utils.yaml.safe_load(roster), fp)
with patch("salt.utils.network.is_reachable_host", MagicMock(return_value=False)):
client = ssh.SSH(opts)
assert opts["tgt"] == user + host
with patch("salt.roster.get_roster_file", MagicMock(return_value=roster_file)):
client._expand_target()
assert opts["tgt"] == host | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_expand_target_no_user(opts, roster):\n host = \"127.0.0.1\"\n opts[\"tgt\"] = host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == host\n\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_expand_target_dns(opts, roster):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_expand_target_ip_address(opts, roster):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_update_expand_target_dns(opts, roster):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def testExpandedTargets(self):\n self.all_targets = self.blade.analyze_targets()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n self.assertTrue(self.blade.get_expanded())\n self.assertTrue(self.all_targets)\n\n system_lib = ('#', 'pthread')\n proto_lib_option = (self.target_path, 'rpc_option_proto')\n proto_lib_meta = (self.target_path, 'rpc_meta_info_proto')\n cc_library_poppy = (self.target_path, 'poppy')\n cc_lib_poppy_mock = (self.target_path, 'poppy_mock')\n static_resource = (self.target_path, 'static_resource')\n cc_test = (self.target_path, 'rpc_channel_test')\n swig_library = (self.target_path, 'poppy_client')\n lex_yacc_library = (self.target_path, 'parser')\n cc_plugin = (self.target_path, 'meter_business')\n gen_rule = (self.target_path, 'search_service_echo')\n java_jar = (os.path.join(self.target_path, 'java'),\n 'poppy_java_client')\n cc_binary = (self.target_path, 'echoserver')\n cc_lib_prebuild = (self.target_path, 'poppy_swig_wrap')\n java_jar_prebuild = (os.path.join(self.target_path, 'java', 'lib'),\n 'protobuf-java')\n\n self.assertTrue(cc_library_poppy in self.all_targets.keys())\n\n poppy_deps = self.all_targets.get(cc_library_poppy, {}).get('deps', [])\n poppy_mock_deps = self.all_targets.get(cc_lib_poppy_mock, {}).get('deps', [])\n self.assertTrue(poppy_deps)\n self.assertTrue(poppy_mock_deps)\n\n self.assertTrue(proto_lib_option in poppy_deps)\n self.assertTrue(proto_lib_meta in poppy_deps)\n self.assertTrue(static_resource in poppy_deps)\n self.assertTrue(system_lib in poppy_deps)\n self.assertTrue(cc_library_poppy in poppy_mock_deps)\n self.assertTrue(proto_lib_meta in poppy_mock_deps)\n\n poppy_client_deps = self.all_targets.get(swig_library, {}).get('deps', [])\n self.assertTrue(poppy_client_deps)\n self.assertTrue(cc_library_poppy in poppy_client_deps)\n self.assertTrue(cc_lib_prebuild in poppy_client_deps)\n\n self.assertTrue(java_jar in self.all_targets.keys())\n java_jar_deps = self.all_targets.get(java_jar, {}).get('deps', [])\n self.assertTrue(java_jar_deps)\n\n self.assertTrue(proto_lib_option in java_jar_deps)\n self.assertTrue(proto_lib_meta in java_jar_deps)\n self.assertTrue(java_jar_prebuild in java_jar_deps)\n self.assertTrue(cc_library_poppy not in java_jar_deps)",
"def test_get_host(self):\n pass",
"def test_get_host_access(self):\n pass",
"def testXtargets(self):\n\n self.inv._devices = collections.OrderedDict([\n ('device_a', self.Device()), ('device_b', self.Device()),\n ('device_c', self.Device()), ('bogus', self.Device())])\n\n # Null command with no targets.\n self.assertEqual('XTargets: ',\n self.inv._CmdFilter('xtargets', []))\n\n # Single host.\n self.inv._CmdFilter('targets', ['device_c'])\n self.inv._CmdFilter('xtargets', ['device_a'])\n self.assertEqual(['device_c'], self.inv.device_list)\n self.inv._CmdFilter('xtargets', ['device_c'])\n self.assertEqual([], self.inv.device_list)\n\n # Exclusion list cleared.\n self.inv._CmdFilter('targets', ['device_c'])\n self.inv._CmdFilter('xtargets', ['^'])\n self.assertEqual(['device_c'], self.inv.device_list)\n\n # Exclude all.\n self.inv._CmdFilter('targets', ['device_c,device_a'])\n self.inv._CmdFilter('xtargets', ['^.*'])\n self.assertEqual([], self.inv.device_list)\n\n # Exclude partial.\n self.inv._CmdFilter('targets', ['device_c,device_a'])\n self.inv._CmdFilter('xtargets', ['^.*_c'])\n self.assertEqual(['device_a'], self.inv.device_list)\n\n # Inrementally add suffix to exclude the last one.\n self.inv._CmdFilter('xtargets', ['^.*_a'], True)\n self.assertEqual([], self.inv.device_list)",
"def test_expand(self):\n # Single\n t = URITemplate(\"https://api.github.com/users{/user}\")\n expanded = \"https://api.github.com/users/sigmavirus24\"\n self.assertEqual(t.expand(user=\"sigmavirus24\"), expanded)\n v = t.variables[0]\n self.assertEqual(v.expand({\"user\": None}), {\"/user\": \"\"})\n\n # Multiple\n t = URITemplate(\"https://api.github.com/users{/user}{/repo}\")\n expanded = \"https://api.github.com/users/sigmavirus24/github3.py\"\n self.assertEqual(\n t.expand({\"repo\": \"github3.py\"}, user=\"sigmavirus24\"), expanded\n )",
"def test_striping_patch(self):\n self.create_simple_filesystem(synthetic_host(\"myserver\"))\n hosts = [synthetic_host(\"myserver{0:d}\".format(n)) for n in range(4)] * 2\n # keep hosts in alternating order, but supply them grouped\n objects = [\n {\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": synthetic_volume_full(host).id}\n for host in sorted(hosts, key=str)\n ]\n response = self.api_client.patch(\"/api/target/\", data={\"deletions\": [], \"objects\": objects})\n self.assertHttpAccepted(response)\n content = json.loads(response.content)\n self.assertEqual(map(str, hosts), list(self._target_hosts(content[\"targets\"])))",
"def test_rebuild_on_host_updated_target(self):\n def fake_get_compute_info(context, host):\n self.assertTrue(context.is_admin)\n self.assertEqual('fake-mini', host)\n cn = objects.ComputeNode(hypervisor_hostname=NODENAME)\n return cn\n\n with test.nested(\n mock.patch.object(self.compute.driver, 'instance_on_disk',\n side_effect=lambda x: True),\n mock.patch.object(self.compute, '_get_compute_info',\n side_effect=fake_get_compute_info)\n ) as (mock_inst, mock_get):\n self._rebuild()\n\n # Should be on destination host\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual(instance['host'], self.compute.host)\n self.assertEqual(NODENAME, instance['node'])\n self.assertTrue(mock_inst.called)\n self.assertTrue(mock_get.called)",
"def test_perform_host_action(self):\n pass",
"def test_nres_targets_list(self):\n response = self.client.get(reverse('nres_calibrations:nres_home'))\n self.assertContains(response, self.target.id)",
"def testTargets(self):\n\n self.inv._devices = collections.OrderedDict([\n ('device_a', self.Device()), ('device_b', self.Device()),\n ('device_c', self.Device()), ('bogus', self.Device())])\n\n # Null command with no targets.\n self.assertEqual('Targets: ', self.inv._CmdFilter('targets', []))\n self.assertEqual('XTargets: ', self.inv._CmdFilter('xtargets', []))\n\n # Single host.\n self.inv._CmdFilter('targets', ['device_c'])\n self.assertEqual(['device_c'], self.inv.device_list)\n # Nonexistant host - rejected.\n self.assertRaises(ValueError, self.inv._CmdFilter,\n 'targets', ['nonexistant'])\n self.assertEqual(['device_c'], self.inv.device_list)\n\n # Multiple hosts.\n self.inv._CmdFilter('targets', ['device_c,device_a'])\n self.assertEqual(['device_a', 'device_c'], self.inv.device_list)\n\n # Build target with incremental suffix addition.\n self.inv._CmdFilter('targets', ['device_c'])\n self.inv._CmdFilter('targets', ['device_a'], True)\n self.assertEqual(['device_a', 'device_c'], self.inv.device_list)\n\n self.inv._CmdFilter('targets', ['^'])\n self.inv._CmdFilter('targets', ['device_c,device_a'], True)\n self.assertEqual(['device_a', 'device_c'], self.inv.device_list)\n\n # Null command with targets.\n self.assertEqual('Targets: device_c,device_a',\n self.inv._CmdFilter('targets', []))\n\n # Clean targets.\n # Unlike other filters, blank targets is not a match.\n self.inv._CmdFilter('targets', ['^'])\n self.assertEqual(self.inv.device_list, [])\n self.inv._CmdFilter('targets', ['^$'])\n self.assertEqual(self.inv.device_list, [])",
"def test_update_targets_dns(opts):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def test_target_existence(self):\n self.create_ptr(\n ip_str='128.193.0.2', fqdn='nonexistent.oregonstate.edu',\n ip_type='4')",
"def test_update_targets_ip_address(opts):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def test_download_host(self):\n pass",
"def test_update_targets_no_user(opts):\n host = \"127.0.0.1\"\n opts[\"tgt\"] = host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == host\n client._update_targets()\n assert opts[\"tgt\"] == host",
"def test_rebuild_on_host_updated_target_node_not_found(self):\n def fake_get_compute_info(context, host):\n raise exception.ComputeHostNotFound(host=host)\n with test.nested(\n mock.patch.object(self.compute.driver, 'instance_on_disk',\n side_effect=lambda x: True),\n mock.patch.object(self.compute, '_get_compute_info',\n side_effect=fake_get_compute_info)\n ) as (mock_inst, mock_get):\n self.assertRaises(exception.InstanceFaultRollback,\n self._rebuild, expect_error=True)\n\n # Should be on destination host\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual('fake_host_2', instance['host'])\n self.assertEqual('fakenode2', instance['node'])\n mock_inst.assert_not_called()\n mock_get.assert_called_once_with(mock.ANY, self.compute.host)",
"def validate_target(target: str) -> bool:\n try:\n gethostbyname(target)\n except (gaierror, UnicodeError):\n return False\n return True",
"def test_parse_tgt(opts):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n assert not opts.get(\"ssh_cli_tgt\")\n client = ssh.SSH(opts)\n assert client.parse_tgt[\"hostname\"] == host\n assert client.parse_tgt[\"user\"] == user.split(\"@\")[0]\n assert opts.get(\"ssh_cli_tgt\") == user + host",
"def test_parse_tgt_no_user(opts):\n host = \"localhost\"\n opts[\"ssh_user\"] = \"ssh-usr\"\n opts[\"tgt\"] = host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n assert not opts.get(\"ssh_cli_tgt\")\n client = ssh.SSH(opts)\n assert client.parse_tgt[\"hostname\"] == host\n assert client.parse_tgt[\"user\"] == opts[\"ssh_user\"]\n assert opts.get(\"ssh_cli_tgt\") == host",
"def targets_placeholder(self):",
"def test_replace_host_subnet(self):\n pass",
"def is_gentarget(self, target):\r\n raise NotImplementedError",
"def _set_target_info(self, targets, host_grps, iqn):\n for host_grp in host_grps:\n port = host_grp['portId']\n gid = host_grp['hostGroupNumber']\n storage_iqn = host_grp['iscsiName']\n if self._is_host_iqn_registered_in_target(port, gid, iqn):\n targets['info'][port] = True\n targets['list'].append((port, gid))\n targets['iqns'][(port, gid)] = storage_iqn\n return True\n return False",
"def targets(tgt, tgt_type=\"glob\"):\n\n ssh_known_hosts_file = __opts__.get(\"ssh_known_hosts_file\")\n\n if not os.path.isfile(ssh_known_hosts_file):\n log.error(\"Cannot find SSH known_hosts file\")\n raise OSError(\"Cannot find SSH known_hosts file\")\n if not os.access(ssh_known_hosts_file, os.R_OK):\n log.error(\"Cannot access SSH known_hosts file: %s\", ssh_known_hosts_file)\n raise OSError(\n \"Cannot access SSH known_hosts file: {}\".format(ssh_known_hosts_file)\n )\n\n with salt.utils.files.fopen(ssh_known_hosts_file, \"r\") as hostfile:\n raw = _parse_ssh_known_hosts([line.rstrip() for line in hostfile])\n\n return __utils__[\"roster_matcher.targets\"](raw, tgt, tgt_type, \"ipv4\")",
"def test_target_arg(self, parse_input):\n with pytest.warns(SyntaxWarning, match=\"only accept keyword options\"):\n parse_input(\"name testname\\nversion 1.0\\ntarget example (6)\")",
"def test_dest_node() -> dict:\n return {\"aetitle\": \"pacsanini_testing_server\", \"ip\": \"localhost\", \"port\": 11112}"
] | [
"0.7059753",
"0.6958238",
"0.6752136",
"0.6731314",
"0.5922972",
"0.5872034",
"0.5807112",
"0.57836884",
"0.57331073",
"0.57315934",
"0.57153296",
"0.5710335",
"0.5678303",
"0.56106454",
"0.55797064",
"0.55640423",
"0.5553922",
"0.55535734",
"0.553814",
"0.5440517",
"0.5362934",
"0.5343388",
"0.5299555",
"0.52868956",
"0.52574426",
"0.5229634",
"0.52251786",
"0.52212006",
"0.5175932",
"0.51534855"
] | 0.7415296 | 0 |
test update_targets when host is ip address | def test_update_targets_ip_address(opts):
host = "127.0.0.1"
user = "test-user@"
opts["tgt"] = user + host
with patch("salt.utils.network.is_reachable_host", MagicMock(return_value=False)):
client = ssh.SSH(opts)
assert opts["tgt"] == user + host
client._update_targets()
assert opts["tgt"] == host
assert client.targets[host]["user"] == user.split("@")[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_targets_dns(opts):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def test_update_targets_no_user(opts):\n host = \"127.0.0.1\"\n opts[\"tgt\"] = host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == host\n client._update_targets()\n assert opts[\"tgt\"] == host",
"def test_ipam_ip_addresses_update(self):\n pass",
"def test_update_expand_target_dns(opts, roster):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def test_rebuild_on_host_updated_target(self):\n def fake_get_compute_info(context, host):\n self.assertTrue(context.is_admin)\n self.assertEqual('fake-mini', host)\n cn = objects.ComputeNode(hypervisor_hostname=NODENAME)\n return cn\n\n with test.nested(\n mock.patch.object(self.compute.driver, 'instance_on_disk',\n side_effect=lambda x: True),\n mock.patch.object(self.compute, '_get_compute_info',\n side_effect=fake_get_compute_info)\n ) as (mock_inst, mock_get):\n self._rebuild()\n\n # Should be on destination host\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual(instance['host'], self.compute.host)\n self.assertEqual(NODENAME, instance['node'])\n self.assertTrue(mock_inst.called)\n self.assertTrue(mock_get.called)",
"def test_replace_host_subnet(self):\n pass",
"def test_patch_host_subnet(self):\n pass",
"def test_expand_target_ip_address(opts, roster):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_striping_patch(self):\n self.create_simple_filesystem(synthetic_host(\"myserver\"))\n hosts = [synthetic_host(\"myserver{0:d}\".format(n)) for n in range(4)] * 2\n # keep hosts in alternating order, but supply them grouped\n objects = [\n {\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": synthetic_volume_full(host).id}\n for host in sorted(hosts, key=str)\n ]\n response = self.api_client.patch(\"/api/target/\", data={\"deletions\": [], \"objects\": objects})\n self.assertHttpAccepted(response)\n content = json.loads(response.content)\n self.assertEqual(map(str, hosts), list(self._target_hosts(content[\"targets\"])))",
"def update_targets(self):\n self.actor.update_target_network()\n self.critic.update_target_network()",
"def test_ipam_ip_addresses_partial_update(self):\n pass",
"def test_networking_project_network_update(self):\n pass",
"def test_perform_host_action(self):\n pass",
"def set_target(self, host, port):\r\n pass",
"def _set_target_info(self, targets, host_grps, iqn):\n for host_grp in host_grps:\n port = host_grp['portId']\n gid = host_grp['hostGroupNumber']\n storage_iqn = host_grp['iscsiName']\n if self._is_host_iqn_registered_in_target(port, gid, iqn):\n targets['info'][port] = True\n targets['list'].append((port, gid))\n targets['iqns'][(port, gid)] = storage_iqn\n return True\n return False",
"async def test_update_address(hass):\n config_entry = await setup_axis_integration(hass)\n device = hass.data[AXIS_DOMAIN][config_entry.unique_id]\n assert device.api.config.host == \"1.2.3.4\"\n\n with patch(\n \"homeassistant.components.axis.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry, respx.mock:\n mock_default_vapix_requests(respx, \"2.3.4.5\")\n await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={\n \"host\": \"2.3.4.5\",\n \"port\": 80,\n \"name\": \"name\",\n \"properties\": {\"macaddress\": MAC},\n },\n context={\"source\": SOURCE_ZEROCONF},\n )\n await hass.async_block_till_done()\n\n assert device.api.config.host == \"2.3.4.5\"\n assert len(mock_setup_entry.mock_calls) == 1",
"def test_get_source_ip(self):\n pass",
"def test_update_host(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n library = get_library(device, \"libtests.so\")\n a = numpy.empty((4711 * 1024,), dtype=int)\n a_expect = numpy.empty_like(a)\n pattern = int(0xdeadbeefabbaabba)\n a_expect[:] = pattern\n offl_a = stream.bind(a)\n stream.invoke(library.test_set_pattern, offl_a, offl_a.size, pattern)\n offl_a.update_host()\n stream.sync()\n\n self.assertTrue((a == a_expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(a, a_expect))",
"def updateHosts(request):\n\n updater = HostUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")",
"def test_expand_target_dns(opts, roster):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def setup_targets(self):\n for i in range(self.min_peers):\n self.targets.append(dict(address=0, tolerance=0, connected=False))\n # NOT IMPLEMENTED HERE",
"def test_get_host(self):\n pass",
"def test_ip(response):\n \n # from comeon_core import update\n ip = getIP()\n print(ip)\n #init_db(engine)\n #update()\n assert True",
"def _update(self, host):\n pass",
"def test_port_update_is_host_aware(self):\n with self.network() as network:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n\n # Map the host to the segment\n self._setup_host_mappings([(segment['segment']['id'], 'fakehost')])\n\n # Create a bound port with no IP address (since there is no subnet)\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n port = self.deserialize(self.fmt, response)\n\n # Create the subnet and try to update the port to get an IP\n with self.subnet(network=network,\n segment_id=segment['segment']['id']) as subnet:\n self._validate_l2_adjacency(network['network']['id'],\n is_adjacent=False)\n # Try requesting an IP (but the only subnet is on a segment)\n data = {'port': {\n 'fixed_ips': [{'subnet_id': subnet['subnet']['id']}]}}\n port_id = port['port']['id']\n port_req = self.new_update_request('ports', data, port_id)\n response = port_req.get_response(self.api)\n\n # Since port is bound and there is a mapping to segment, it succeeds.\n self.assertEqual(webob.exc.HTTPOk.code, response.status_int)\n self._assert_one_ip_in_subnet(response, subnet['subnet']['cidr'])",
"def addTargets(v):\n if v.spoof:\n print(\" \" + bcolors.WARNING + \"Turn off spoofer first\" + bcolors.ENDC)\n time.sleep(1)\n return\n try:\n target = input(\" Enter IP address of targets separated with spaces: \")\n except KeyboardInterrupt:\n return\n\n target = target.split(\" \")\n\n if len(v.targets) == 0:\n try:\n gw = input(\" Enter IP address of router (leave blank if same subnet): \")\n except KeyboardInterrupt:\n return\n if validIPAddress(gw):\n tmp = spoofer.get_mac(gw)\n if tmp:\n v.targets.append(gw)\n v.macs.append(tmp)\n else:\n print(\" \" + bcolors.WARNING + \"Did not add \" + gw + \" since no mac address found\" + bcolors.ENDC)\n time.sleep(2)\n return\n else:\n gw = getGwIp(target[0])\n if gw:\n tmp = spoofer.get_mac(gw)\n if tmp:\n v.targets.append(gw)\n v.macs.append(tmp)\n else:\n if gw:\n print(\" \" + bcolors.WARNING + \"Did not add \" + gw + \" since no mac address found\" + bcolors.ENDC)\n time.sleep(1)\n return\n\n for x in target:\n if validIPAddress(x):\n tmp = spoofer.get_mac(x)\n if tmp:\n v.targets.append(x)\n v.macs.append(x)\n else:\n print(\" \" + bcolors.WARNING + \"Did not add \" + x + \" since no mac address found\" + bcolors.ENDC)\n time.sleep(1)\n else:\n print(\" \" + bcolors.WARNING + x + \" is not a valid ip address\" + bcolors.ENDC)\n time.sleep(1)\n\n return",
"def update_targets(self, indexes: List[int], new_targets: np.ndarray):\n if self.train:\n self.train_nat[indexes, :] = new_targets\n else:\n self.test_nat[indexes, :] = new_targets",
"def test_client_address_update(self):\n pass",
"def test_ping_from_neighbor(duthosts, enum_rand_one_per_hwsku_frontend_hostname, nbrhosts):\n duthost = duthosts[enum_rand_one_per_hwsku_frontend_hostname]\n cfg_facts = duthost.config_facts(host=duthost.hostname, source=\"running\")[\"ansible_facts\"]\n dut_ports = cfg_facts[\"PORT\"]\n portchannel_itfs = cfg_facts[\"PORTCHANNEL_INTERFACE\"]\n for portchannel, ip_list in list(portchannel_itfs.items()):\n portchannel_members = list(cfg_facts[\"PORTCHANNEL_MEMBER\"][portchannel].keys())\n hostname = dut_ports[portchannel_members[0]]['description'].split(':')[0]\n for nbr_hostname, nbrhost in list(nbrhosts.items()):\n if nbr_hostname != hostname:\n continue\n for ip in ip_list:\n ip = ip.split('/')[0]\n pytest_assert(nbrhost['host'].ping_dest(ip), \"{} ping port channel {} failed\".format(nbr_hostname, ip))",
"def autofixTargets(self, local_ctx):\n pass"
] | [
"0.7464471",
"0.71415",
"0.6910683",
"0.6657661",
"0.64958376",
"0.64322263",
"0.6429889",
"0.6385466",
"0.6237695",
"0.61982393",
"0.61476934",
"0.61162746",
"0.6114164",
"0.610654",
"0.6072472",
"0.6027562",
"0.6008352",
"0.598404",
"0.5966812",
"0.5915929",
"0.5911148",
"0.59039783",
"0.5876275",
"0.5845962",
"0.5791741",
"0.5774339",
"0.57635325",
"0.5760564",
"0.5760127",
"0.57295173"
] | 0.8039261 | 0 |
test update_targets when host is dns | def test_update_targets_dns(opts):
host = "localhost"
user = "test-user@"
opts["tgt"] = user + host
with patch("salt.utils.network.is_reachable_host", MagicMock(return_value=False)):
client = ssh.SSH(opts)
assert opts["tgt"] == user + host
client._update_targets()
assert opts["tgt"] == host
assert client.targets[host]["user"] == user.split("@")[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_targets_ip_address(opts):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def test_update_expand_target_dns(opts, roster):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def test_update_targets_no_user(opts):\n host = \"127.0.0.1\"\n opts[\"tgt\"] = host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == host\n client._update_targets()\n assert opts[\"tgt\"] == host",
"def test_expand_target_dns(opts, roster):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_rebuild_on_host_updated_target(self):\n def fake_get_compute_info(context, host):\n self.assertTrue(context.is_admin)\n self.assertEqual('fake-mini', host)\n cn = objects.ComputeNode(hypervisor_hostname=NODENAME)\n return cn\n\n with test.nested(\n mock.patch.object(self.compute.driver, 'instance_on_disk',\n side_effect=lambda x: True),\n mock.patch.object(self.compute, '_get_compute_info',\n side_effect=fake_get_compute_info)\n ) as (mock_inst, mock_get):\n self._rebuild()\n\n # Should be on destination host\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual(instance['host'], self.compute.host)\n self.assertEqual(NODENAME, instance['node'])\n self.assertTrue(mock_inst.called)\n self.assertTrue(mock_get.called)",
"def test_updatednsrecord(kasserver, kasapi):\n kasserver.add_dns_record(\"test.example.com\", \"CNAME\", \"www.example2.com\")\n assert kasapi.requests_contains(\"update_dns_settings\")",
"def test_striping_patch(self):\n self.create_simple_filesystem(synthetic_host(\"myserver\"))\n hosts = [synthetic_host(\"myserver{0:d}\".format(n)) for n in range(4)] * 2\n # keep hosts in alternating order, but supply them grouped\n objects = [\n {\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": synthetic_volume_full(host).id}\n for host in sorted(hosts, key=str)\n ]\n response = self.api_client.patch(\"/api/target/\", data={\"deletions\": [], \"objects\": objects})\n self.assertHttpAccepted(response)\n content = json.loads(response.content)\n self.assertEqual(map(str, hosts), list(self._target_hosts(content[\"targets\"])))",
"def update_targets(self):\n self.actor.update_target_network()\n self.critic.update_target_network()",
"def updateHosts(request):\n\n updater = HostUpdater()\n updater.run()\n return http.HttpResponse(\"Ok\")",
"async def test_update_address(hass):\n config_entry = await setup_axis_integration(hass)\n device = hass.data[AXIS_DOMAIN][config_entry.unique_id]\n assert device.api.config.host == \"1.2.3.4\"\n\n with patch(\n \"homeassistant.components.axis.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry, respx.mock:\n mock_default_vapix_requests(respx, \"2.3.4.5\")\n await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={\n \"host\": \"2.3.4.5\",\n \"port\": 80,\n \"name\": \"name\",\n \"properties\": {\"macaddress\": MAC},\n },\n context={\"source\": SOURCE_ZEROCONF},\n )\n await hass.async_block_till_done()\n\n assert device.api.config.host == \"2.3.4.5\"\n assert len(mock_setup_entry.mock_calls) == 1",
"def update_dns(self):\n\t\tfor url in self.update_urls:\n\n\t\t\t# Adds protocol if address does not contain it\n\t\t\tif 'http://' not in url: url = 'http://' + url\n\n\t\t\trequest = urllib.urlopen(url)\n\t\t\trequest.close()",
"def alias_all(self, host_names, target, raise_on_not_found=True):\n self.set_all(host_names, self.get_one(target, raise_on_not_found))",
"async def test_discovered_by_dhcp_or_integration_discovery_updates_host(\n hass: HomeAssistant, source, data\n) -> None:\n entry = MockConfigEntry(\n domain=DOMAIN,\n unique_id=TEST_SYSTEM_INFO[\"id\"],\n data={CONF_HOST: \"dummy\"},\n )\n entry.add_to_hass(hass)\n\n with _patch_wizlight():\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": source}, data=data\n )\n await hass.async_block_till_done()\n\n assert result[\"type\"] == FlowResultType.ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert entry.data[CONF_HOST] == FAKE_IP",
"def test_expand_target_ip_address(opts, roster):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_udp_query():\n assert dnsck_query(\"8.8.8.8\", \"google.com\", \"a\", 1) == 0",
"def _set_target_info(self, targets, host_grps, iqn):\n for host_grp in host_grps:\n port = host_grp['portId']\n gid = host_grp['hostGroupNumber']\n storage_iqn = host_grp['iscsiName']\n if self._is_host_iqn_registered_in_target(port, gid, iqn):\n targets['info'][port] = True\n targets['list'].append((port, gid))\n targets['iqns'][(port, gid)] = storage_iqn\n return True\n return False",
"def test_hostMapper(self):\n h = self.proxyServices[0]\n self.assertEquals(h.proxyName, 'web')\n self.assertEquals(h.proxyAddresses, [('127.0.0.1', 8080)])\n self.assertEquals(h.groupName, 'prod')\n self.assertEquals(h.hostName, 'host1')\n self.assertEquals(h.hostAddress, ('127.0.0.1', 7001))\n self.assertEquals(h.groupEnabled, True)\n h = self.proxyServices[3]\n self.assertEquals(h.groupName, 'test')\n self.assertEquals(h.groupEnabled, False)\n h = self.proxyServices[-1]\n self.assertEquals(h.proxyName, 'dns')\n self.assertEquals(h.groupEnabled, True)",
"def test_get_host(self):\n pass",
"def test_update_domain_only(self):\n self.test_update()",
"def test_networking_project_network_update(self):\n pass",
"def test_ipam_ip_addresses_update(self):\n pass",
"def test_resolve(self):\n node = create_node(\"somewhere\", \"myservice\", \"env1\")\n node2 = create_node(\"somewhere2\", \"myservice\", \"env2\")\n disco = create_disco()\n disco.onMessage(None, NodeActive(node))\n disco.onMessage(None, NodeActive(node2))\n # Do repeatedly in case round robin is somehow tricking us:\n for i in range(10):\n self.assertEqual(resolve(disco, \"myservice\", \"1.0\", \"env1\").address,\n \"somewhere\")\n for i in range(10):\n self.assertEqual(resolve(disco, \"myservice\", \"1.0\", \"env2\").address,\n \"somewhere2\")",
"def autofixTargets(self, local_ctx):\n pass",
"def test_target_existence(self):\n self.create_ptr(\n ip_str='128.193.0.2', fqdn='nonexistent.oregonstate.edu',\n ip_type='4')",
"def setup_targets(self):\n for i in range(self.min_peers):\n self.targets.append(dict(address=0, tolerance=0, connected=False))\n # NOT IMPLEMENTED HERE",
"def test_perform_host_action(self):\n pass",
"def validate_target(target: str) -> bool:\n try:\n gethostbyname(target)\n except (gaierror, UnicodeError):\n return False\n return True",
"def test_udp_bad_server():\n assert dnsck_query(\"8.8.8.88\", \"google.com\", \"A\", 1) == 1",
"async def test_aiodiscover_finds_new_hosts(hass: HomeAssistant) -> None:\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init, patch(\n \"homeassistant.components.dhcp.DiscoverHosts.async_discover\",\n return_value=[\n {\n dhcp.DISCOVERY_IP_ADDRESS: \"192.168.210.56\",\n dhcp.DISCOVERY_HOSTNAME: \"connect\",\n dhcp.DISCOVERY_MAC_ADDRESS: \"b8b7f16db533\",\n }\n ],\n ):\n device_tracker_watcher = dhcp.NetworkWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"connect\",\n macaddress=\"b8b7f16db533\",\n )",
"def test_host_header_set_ok(self):\n requests = [\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\",\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com \\r\\n\\r\\n\",\n \"GET http://tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\",\n \"GET http://[email protected]/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\",\n (\n \"GET http://[email protected]/ HTTP/1.1\\r\\n\"\n \"Host: tempesta-tech.com\\r\\n\"\n \"Forwarded: host=tempesta-tech.com\\r\\n\"\n \"Forwarded: host=tempesta1-tech.com\\r\\n\\r\\n\"\n ),\n ]\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\", requests=requests\n )\n self.check_response(client, status_code=\"200\", warning_msg=\"frang: \")"
] | [
"0.7475771",
"0.7439883",
"0.6938763",
"0.674091",
"0.6428459",
"0.61800534",
"0.6096695",
"0.60605717",
"0.58983433",
"0.5897955",
"0.58913845",
"0.5874283",
"0.585102",
"0.58426744",
"0.5839361",
"0.5837791",
"0.5811867",
"0.5776639",
"0.5776224",
"0.5753839",
"0.5708274",
"0.5685687",
"0.5675461",
"0.5605087",
"0.56023324",
"0.56009495",
"0.559623",
"0.55954945",
"0.5594483",
"0.558318"
] | 0.8254757 | 0 |
test update_targets when no user defined | def test_update_targets_no_user(opts):
host = "127.0.0.1"
opts["tgt"] = host
with patch("salt.utils.network.is_reachable_host", MagicMock(return_value=False)):
client = ssh.SSH(opts)
assert opts["tgt"] == host
client._update_targets()
assert opts["tgt"] == host | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_targets_ip_address(opts):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def update_target(self):\n pass",
"def test_update_targets_dns(opts):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def update_targets(self):\n self.actor.update_target_network()\n self.critic.update_target_network()",
"def update_global_targets(all_targets, tile_targets):\n\n # loop over each target and check whether it hass been assigned to a fiber.\n for i_target in range(tile_targets.n):\n if(tile_targets.fiber[i_target]!=-1):\n loc = np.where(all_targets.id == tile_targets.id[i_target])\n if(np.size(loc)!=0):\n loc = loc[0]\n all_targets.n_observed[loc] = all_targets.n_observed[loc] + 1\n # TOWRITE: still have to make the update to ASSIGNEDTYPE and ASSIGNEDZ \n else:\n raise ValueError('The target id %d in tile was not found in general target list'%(tile_targets.id[i_target]))\n return",
"def targets_placeholder(self):",
"def test_update_case(self):\n pass",
"def post_process(self, relevant_targets):\r\n pass",
"def update_targets(self, items):\n\n items = list(filter(None, items))\n\n if len(items) > 0:\n self.logger.info(\"Updating {} substrate matches\".format(len(items)))\n self.substrates.update(docs=items)\n else:\n self.logger.info(\"No items to update\")",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def test_update(self):\n pass",
"def update_targets(self, indexes: List[int], new_targets: np.ndarray):\n if self.train:\n self.train_nat[indexes, :] = new_targets\n else:\n self.test_nat[indexes, :] = new_targets",
"def execute(self, targets):",
"def update_targets(self, items):\n items = list(filter(None, chain(*items)))\n items = list(filter(None, items))\n\n if len(items) > 0:\n self.logger.info(\"Updating {} thermo documents\".format(len(items)))\n bulk = self.thermo().initialize_ordered_bulk_op()\n\n for m in items:\n m[self.thermo.lu_field] = datetime.utcnow()\n bulk.find({\"material_id\": m[\"material_id\"]}).upsert().replace_one(m)\n bulk.execute()\n else:\n self.logger.info(\"No items to update\")",
"def autofixTargets(self, local_ctx):\n pass",
"def test_update9(self):\n pass",
"def test_update_scenario(self):\n pass",
"def test_update_expand_target_dns(opts, roster):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def test_update_one(self):\n pass",
"def test_update_targetvalues_new(clean_targeting, carrier):\n\n AppNexusCarrier.update_targetvalues()\n\n # both raw and representant are added\n assert 2 == TargetValue.objects.count()\n assert 1 == TargetValue.objects.representants().count()\n assert 1 == TargetValue.objects.represented().count()",
"def test_update_goal(self):\n pass",
"def test_update_rule(self):\n pass",
"def reset(targets):",
"def update_all_targets(self):\n soft_update(self.target_critic, self.critic, self.tau)\n soft_update(self.target_policy, self.policy, self.tau)",
"def _setup_target_updates(model_scope, target_scope, scope, tau, verbose):\n if scope is not None:\n model_scope = scope + '/' + model_scope\n target_scope = scope + '/' + target_scope\n\n return get_target_updates(\n get_trainable_vars(model_scope),\n get_trainable_vars(target_scope),\n tau, verbose)",
"def test_update(self):\n # this is tested graphically, as it is UI\n pass",
"def update_targets(self, items):\n items = list(filter(None, items))\n\n if len(items) > 0:\n self.logger.info(\"Updating {} site-descriptors docs\".format(len(items)))\n self.site_descriptors.update(docs=items)\n else:\n self.logger.info(\"No items to update\")",
"def updated_targets(self, targets, destination_directory):\n\n # Do the arguments have the correct format?\n # Raise 'tuf.FormatError' if there is a mismatch.\n tuf.formats.TARGETFILES_SCHEMA.check_match(targets)\n tuf.formats.PATH_SCHEMA.check_match(destination_directory)\n\n updated_targets = []\n\n for target in targets:\n # Get the target's filepath located in 'destination_directory'.\n # We will compare targets against this file.\n target_filepath = os.path.join(destination_directory, target['filepath'])\n \n # Try one of the algorithm/digest combos for a mismatch. We break\n # as soon as we find a mismatch.\n for algorithm, digest in target['fileinfo']['hashes'].items():\n digest_object = None\n try:\n digest_object = tuf.hash.digest_filename(target_filepath,\n algorithm=algorithm)\n # This exception would occur if the target does not exist locally. \n except IOError:\n updated_targets.append(target)\n break\n # The file does exist locally, check if its hash differs. \n if digest_object.hexdigest() != digest:\n updated_targets.append(target)\n break\n \n return updated_targets",
"def test_SELFUPDATE_TARGET(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"SELFUPDATE_TARGET=ywangd:dev selfupdate --check\", exitcode=None)\n self.assertIn(\"Target: ywangd:dev\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)"
] | [
"0.6914559",
"0.66063684",
"0.65769297",
"0.65296984",
"0.65164536",
"0.64756906",
"0.6462618",
"0.6382679",
"0.6335945",
"0.633125",
"0.633125",
"0.633125",
"0.6292728",
"0.6273413",
"0.62559766",
"0.62496156",
"0.62388134",
"0.6209271",
"0.6199935",
"0.61500025",
"0.614538",
"0.61446536",
"0.60845226",
"0.60717106",
"0.60605854",
"0.605179",
"0.6048279",
"0.5982225",
"0.59775305",
"0.5956603"
] | 0.7098015 | 0 |
test update_targets and expand_target when host is dns | def test_update_expand_target_dns(opts, roster):
host = "localhost"
user = "test-user@"
opts["tgt"] = user + host
with patch("salt.utils.network.is_reachable_host", MagicMock(return_value=False)):
client = ssh.SSH(opts)
assert opts["tgt"] == user + host
with patch(
"salt.roster.get_roster_file", MagicMock(return_value="/etc/salt/roster")
), patch(
"salt.client.ssh.compile_template",
MagicMock(return_value=salt.utils.yaml.safe_load(roster)),
):
client._expand_target()
client._update_targets()
assert opts["tgt"] == host
assert client.targets[host]["user"] == user.split("@")[0] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_update_targets_dns(opts):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def test_expand_target_dns(opts, roster):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_update_targets_ip_address(opts):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def test_update_targets_no_user(opts):\n host = \"127.0.0.1\"\n opts[\"tgt\"] = host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == host\n client._update_targets()\n assert opts[\"tgt\"] == host",
"def test_rebuild_on_host_updated_target(self):\n def fake_get_compute_info(context, host):\n self.assertTrue(context.is_admin)\n self.assertEqual('fake-mini', host)\n cn = objects.ComputeNode(hypervisor_hostname=NODENAME)\n return cn\n\n with test.nested(\n mock.patch.object(self.compute.driver, 'instance_on_disk',\n side_effect=lambda x: True),\n mock.patch.object(self.compute, '_get_compute_info',\n side_effect=fake_get_compute_info)\n ) as (mock_inst, mock_get):\n self._rebuild()\n\n # Should be on destination host\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual(instance['host'], self.compute.host)\n self.assertEqual(NODENAME, instance['node'])\n self.assertTrue(mock_inst.called)\n self.assertTrue(mock_get.called)",
"def test_expand_target_ip_address(opts, roster):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_expand_target_no_host(opts, tmp_path):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n roster = \"\"\"\n localhost: 127.0.0.1\n \"\"\"\n roster_file = str(tmp_path / \"test_roster_no_host\")\n with salt.utils.files.fopen(roster_file, \"w\") as fp:\n salt.utils.yaml.safe_dump(salt.utils.yaml.safe_load(roster), fp)\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\"salt.roster.get_roster_file\", MagicMock(return_value=roster_file)):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_striping_patch(self):\n self.create_simple_filesystem(synthetic_host(\"myserver\"))\n hosts = [synthetic_host(\"myserver{0:d}\".format(n)) for n in range(4)] * 2\n # keep hosts in alternating order, but supply them grouped\n objects = [\n {\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": synthetic_volume_full(host).id}\n for host in sorted(hosts, key=str)\n ]\n response = self.api_client.patch(\"/api/target/\", data={\"deletions\": [], \"objects\": objects})\n self.assertHttpAccepted(response)\n content = json.loads(response.content)\n self.assertEqual(map(str, hosts), list(self._target_hosts(content[\"targets\"])))",
"def alias_all(self, host_names, target, raise_on_not_found=True):\n self.set_all(host_names, self.get_one(target, raise_on_not_found))",
"def _set_target_info(self, targets, host_grps, iqn):\n for host_grp in host_grps:\n port = host_grp['portId']\n gid = host_grp['hostGroupNumber']\n storage_iqn = host_grp['iscsiName']\n if self._is_host_iqn_registered_in_target(port, gid, iqn):\n targets['info'][port] = True\n targets['list'].append((port, gid))\n targets['iqns'][(port, gid)] = storage_iqn\n return True\n return False",
"def test_resolve(self):\n node = create_node(\"somewhere\", \"myservice\", \"env1\")\n node2 = create_node(\"somewhere2\", \"myservice\", \"env2\")\n disco = create_disco()\n disco.onMessage(None, NodeActive(node))\n disco.onMessage(None, NodeActive(node2))\n # Do repeatedly in case round robin is somehow tricking us:\n for i in range(10):\n self.assertEqual(resolve(disco, \"myservice\", \"1.0\", \"env1\").address,\n \"somewhere\")\n for i in range(10):\n self.assertEqual(resolve(disco, \"myservice\", \"1.0\", \"env2\").address,\n \"somewhere2\")",
"def autofixTargets(self, local_ctx):\n pass",
"def test_get_host(self):\n pass",
"def test_perform_host_action(self):\n pass",
"def test_expand_target_no_user(opts, roster):\n host = \"127.0.0.1\"\n opts[\"tgt\"] = host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == host\n\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def testExpandedTargets(self):\n self.all_targets = self.blade.analyze_targets()\n\n sys.stdout.flush()\n sys.stderr.flush()\n\n self.assertTrue(self.blade.get_expanded())\n self.assertTrue(self.all_targets)\n\n system_lib = ('#', 'pthread')\n proto_lib_option = (self.target_path, 'rpc_option_proto')\n proto_lib_meta = (self.target_path, 'rpc_meta_info_proto')\n cc_library_poppy = (self.target_path, 'poppy')\n cc_lib_poppy_mock = (self.target_path, 'poppy_mock')\n static_resource = (self.target_path, 'static_resource')\n cc_test = (self.target_path, 'rpc_channel_test')\n swig_library = (self.target_path, 'poppy_client')\n lex_yacc_library = (self.target_path, 'parser')\n cc_plugin = (self.target_path, 'meter_business')\n gen_rule = (self.target_path, 'search_service_echo')\n java_jar = (os.path.join(self.target_path, 'java'),\n 'poppy_java_client')\n cc_binary = (self.target_path, 'echoserver')\n cc_lib_prebuild = (self.target_path, 'poppy_swig_wrap')\n java_jar_prebuild = (os.path.join(self.target_path, 'java', 'lib'),\n 'protobuf-java')\n\n self.assertTrue(cc_library_poppy in self.all_targets.keys())\n\n poppy_deps = self.all_targets.get(cc_library_poppy, {}).get('deps', [])\n poppy_mock_deps = self.all_targets.get(cc_lib_poppy_mock, {}).get('deps', [])\n self.assertTrue(poppy_deps)\n self.assertTrue(poppy_mock_deps)\n\n self.assertTrue(proto_lib_option in poppy_deps)\n self.assertTrue(proto_lib_meta in poppy_deps)\n self.assertTrue(static_resource in poppy_deps)\n self.assertTrue(system_lib in poppy_deps)\n self.assertTrue(cc_library_poppy in poppy_mock_deps)\n self.assertTrue(proto_lib_meta in poppy_mock_deps)\n\n poppy_client_deps = self.all_targets.get(swig_library, {}).get('deps', [])\n self.assertTrue(poppy_client_deps)\n self.assertTrue(cc_library_poppy in poppy_client_deps)\n self.assertTrue(cc_lib_prebuild in poppy_client_deps)\n\n self.assertTrue(java_jar in self.all_targets.keys())\n java_jar_deps = self.all_targets.get(java_jar, {}).get('deps', [])\n self.assertTrue(java_jar_deps)\n\n self.assertTrue(proto_lib_option in java_jar_deps)\n self.assertTrue(proto_lib_meta in java_jar_deps)\n self.assertTrue(java_jar_prebuild in java_jar_deps)\n self.assertTrue(cc_library_poppy not in java_jar_deps)",
"def test_rebuild_on_host_updated_target_node_not_found(self):\n def fake_get_compute_info(context, host):\n raise exception.ComputeHostNotFound(host=host)\n with test.nested(\n mock.patch.object(self.compute.driver, 'instance_on_disk',\n side_effect=lambda x: True),\n mock.patch.object(self.compute, '_get_compute_info',\n side_effect=fake_get_compute_info)\n ) as (mock_inst, mock_get):\n self.assertRaises(exception.InstanceFaultRollback,\n self._rebuild, expect_error=True)\n\n # Should be on destination host\n instance = db.instance_get(self.context, self.inst.id)\n self.assertEqual('fake_host_2', instance['host'])\n self.assertEqual('fakenode2', instance['node'])\n mock_inst.assert_not_called()\n mock_get.assert_called_once_with(mock.ANY, self.compute.host)",
"def test_answerless(self):\n servers = {\n ('1.1.2.3', 53): {\n ('example.com', A): {\n },\n },\n }\n resolver = self._getResolver(servers)\n d = resolver.lookupAddress('example.com')\n return self.assertFailure(d, ResolverError)",
"def update_targets(self):\n self.actor.update_target_network()\n self.critic.update_target_network()",
"def test_hostMapper(self):\n h = self.proxyServices[0]\n self.assertEquals(h.proxyName, 'web')\n self.assertEquals(h.proxyAddresses, [('127.0.0.1', 8080)])\n self.assertEquals(h.groupName, 'prod')\n self.assertEquals(h.hostName, 'host1')\n self.assertEquals(h.hostAddress, ('127.0.0.1', 7001))\n self.assertEquals(h.groupEnabled, True)\n h = self.proxyServices[3]\n self.assertEquals(h.groupName, 'test')\n self.assertEquals(h.groupEnabled, False)\n h = self.proxyServices[-1]\n self.assertEquals(h.proxyName, 'dns')\n self.assertEquals(h.groupEnabled, True)",
"def test_target_existence(self):\n self.create_ptr(\n ip_str='128.193.0.2', fqdn='nonexistent.oregonstate.edu',\n ip_type='4')",
"async def test_update_address(hass):\n config_entry = await setup_axis_integration(hass)\n device = hass.data[AXIS_DOMAIN][config_entry.unique_id]\n assert device.api.config.host == \"1.2.3.4\"\n\n with patch(\n \"homeassistant.components.axis.async_setup_entry\",\n return_value=True,\n ) as mock_setup_entry, respx.mock:\n mock_default_vapix_requests(respx, \"2.3.4.5\")\n await hass.config_entries.flow.async_init(\n AXIS_DOMAIN,\n data={\n \"host\": \"2.3.4.5\",\n \"port\": 80,\n \"name\": \"name\",\n \"properties\": {\"macaddress\": MAC},\n },\n context={\"source\": SOURCE_ZEROCONF},\n )\n await hass.async_block_till_done()\n\n assert device.api.config.host == \"2.3.4.5\"\n assert len(mock_setup_entry.mock_calls) == 1",
"def set_discover_targets(discover: bool) -> dict:\n return {\"method\": \"Target.setDiscoverTargets\", \"params\": {\"discover\": discover}}",
"async def test_discovered_by_dhcp_or_integration_discovery_updates_host(\n hass: HomeAssistant, source, data\n) -> None:\n entry = MockConfigEntry(\n domain=DOMAIN,\n unique_id=TEST_SYSTEM_INFO[\"id\"],\n data={CONF_HOST: \"dummy\"},\n )\n entry.add_to_hass(hass)\n\n with _patch_wizlight():\n result = await hass.config_entries.flow.async_init(\n DOMAIN, context={\"source\": source}, data=data\n )\n await hass.async_block_till_done()\n\n assert result[\"type\"] == FlowResultType.ABORT\n assert result[\"reason\"] == \"already_configured\"\n assert entry.data[CONF_HOST] == FAKE_IP",
"def setup_targets(self):\n for i in range(self.min_peers):\n self.targets.append(dict(address=0, tolerance=0, connected=False))\n # NOT IMPLEMENTED HERE",
"def _set_target_info_by_name(self, targets, port, target_name, iqn):\n host_iqn_registered_in_target = (\n self._get_host_iqn_registered_in_target_by_name(\n port, target_name, iqn))\n if host_iqn_registered_in_target:\n gid = host_iqn_registered_in_target['hostGroupNumber']\n storage_iqn = self.client.get_host_grp(port, gid)['iscsiName']\n targets['info'][port] = True\n targets['list'].append((port, gid))\n targets['iqns'][(port, gid)] = storage_iqn\n return True\n return False",
"def test_updatednsrecord(kasserver, kasapi):\n kasserver.add_dns_record(\"test.example.com\", \"CNAME\", \"www.example2.com\")\n assert kasapi.requests_contains(\"update_dns_settings\")",
"def test_host_header_set_ok(self):\n requests = [\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com:80\\r\\n\\r\\n\",\n \"GET / HTTP/1.1\\r\\nHost: tempesta-tech.com \\r\\n\\r\\n\",\n \"GET http://tempesta-tech.com/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\",\n \"GET http://[email protected]/ HTTP/1.1\\r\\nHost: tempesta-tech.com\\r\\n\\r\\n\",\n (\n \"GET http://[email protected]/ HTTP/1.1\\r\\n\"\n \"Host: tempesta-tech.com\\r\\n\"\n \"Forwarded: host=tempesta-tech.com\\r\\n\"\n \"Forwarded: host=tempesta1-tech.com\\r\\n\\r\\n\"\n ),\n ]\n client = self.base_scenario(\n frang_config=\"http_strict_host_checking true;\", requests=requests\n )\n self.check_response(client, status_code=\"200\", warning_msg=\"frang: \")",
"async def test_aiodiscover_finds_new_hosts(hass: HomeAssistant) -> None:\n with patch.object(hass.config_entries.flow, \"async_init\") as mock_init, patch(\n \"homeassistant.components.dhcp.DiscoverHosts.async_discover\",\n return_value=[\n {\n dhcp.DISCOVERY_IP_ADDRESS: \"192.168.210.56\",\n dhcp.DISCOVERY_HOSTNAME: \"connect\",\n dhcp.DISCOVERY_MAC_ADDRESS: \"b8b7f16db533\",\n }\n ],\n ):\n device_tracker_watcher = dhcp.NetworkWatcher(\n hass,\n {},\n [{\"domain\": \"mock-domain\", \"hostname\": \"connect\", \"macaddress\": \"B8B7F1*\"}],\n )\n await device_tracker_watcher.async_start()\n await hass.async_block_till_done()\n await device_tracker_watcher.async_stop()\n await hass.async_block_till_done()\n\n assert len(mock_init.mock_calls) == 1\n assert mock_init.mock_calls[0][1][0] == \"mock-domain\"\n assert mock_init.mock_calls[0][2][\"context\"] == {\n \"source\": config_entries.SOURCE_DHCP\n }\n assert mock_init.mock_calls[0][2][\"data\"] == dhcp.DhcpServiceInfo(\n ip=\"192.168.210.56\",\n hostname=\"connect\",\n macaddress=\"b8b7f16db533\",\n )",
"def set_target(self, host, port):\r\n pass"
] | [
"0.79298717",
"0.7271698",
"0.7193563",
"0.66447264",
"0.6619049",
"0.63972855",
"0.6149345",
"0.6114023",
"0.601661",
"0.5973573",
"0.59198385",
"0.59094423",
"0.5870842",
"0.58562446",
"0.5788324",
"0.5756256",
"0.57487756",
"0.57337517",
"0.5731826",
"0.57180697",
"0.56830317",
"0.56815875",
"0.56811225",
"0.56559885",
"0.5635674",
"0.5629317",
"0.5628231",
"0.5602697",
"0.55844843",
"0.55668336"
] | 0.7761494 | 1 |
test parse_tgt when user and host set on the ssh cli tgt | def test_parse_tgt(opts):
host = "localhost"
user = "test-user@"
opts["tgt"] = user + host
with patch("salt.utils.network.is_reachable_host", MagicMock(return_value=False)):
assert not opts.get("ssh_cli_tgt")
client = ssh.SSH(opts)
assert client.parse_tgt["hostname"] == host
assert client.parse_tgt["user"] == user.split("@")[0]
assert opts.get("ssh_cli_tgt") == user + host | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_parse_tgt_no_user(opts):\n host = \"localhost\"\n opts[\"ssh_user\"] = \"ssh-usr\"\n opts[\"tgt\"] = host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n assert not opts.get(\"ssh_cli_tgt\")\n client = ssh.SSH(opts)\n assert client.parse_tgt[\"hostname\"] == host\n assert client.parse_tgt[\"user\"] == opts[\"ssh_user\"]\n assert opts.get(\"ssh_cli_tgt\") == host",
"def test_update_targets_ip_address(opts):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def test_expand_target_no_host(opts, tmp_path):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n roster = \"\"\"\n localhost: 127.0.0.1\n \"\"\"\n roster_file = str(tmp_path / \"test_roster_no_host\")\n with salt.utils.files.fopen(roster_file, \"w\") as fp:\n salt.utils.yaml.safe_dump(salt.utils.yaml.safe_load(roster), fp)\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\"salt.roster.get_roster_file\", MagicMock(return_value=roster_file)):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_update_targets_no_user(opts):\n host = \"127.0.0.1\"\n opts[\"tgt\"] = host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == host\n client._update_targets()\n assert opts[\"tgt\"] == host",
"def test_expand_target_ip_address(opts, roster):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_update_targets_dns(opts):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def test_expand_target_no_user(opts, roster):\n host = \"127.0.0.1\"\n opts[\"tgt\"] = host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == host\n\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_ssh_cmd(self):\n self.assertEqual(general.ssh_command('user','example.com',('ls','-l')).command_line,\n ['ssh','[email protected]','ls','-l'])",
"def test_expand_target_dns(opts, roster):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_update_expand_target_dns(opts, roster):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def test_ssh_cmd_no_user(self):\n self.assertEqual(general.ssh_command(None,'example.com',('ls','-l')).command_line,\n ['ssh','example.com','ls','-l'])",
"def test_verify_ssh_access_with_root_works(driver):",
"def test_target_arg(self, parse_input):\n with pytest.warns(SyntaxWarning, match=\"only accept keyword options\"):\n parse_input(\"name testname\\nversion 1.0\\ntarget example (6)\")",
"def targets(tgt, tgt_type=\"glob\"):\n\n ssh_known_hosts_file = __opts__.get(\"ssh_known_hosts_file\")\n\n if not os.path.isfile(ssh_known_hosts_file):\n log.error(\"Cannot find SSH known_hosts file\")\n raise OSError(\"Cannot find SSH known_hosts file\")\n if not os.access(ssh_known_hosts_file, os.R_OK):\n log.error(\"Cannot access SSH known_hosts file: %s\", ssh_known_hosts_file)\n raise OSError(\n \"Cannot access SSH known_hosts file: {}\".format(ssh_known_hosts_file)\n )\n\n with salt.utils.files.fopen(ssh_known_hosts_file, \"r\") as hostfile:\n raw = _parse_ssh_known_hosts([line.rstrip() for line in hostfile])\n\n return __utils__[\"roster_matcher.targets\"](raw, tgt, tgt_type, \"ipv4\")",
"def the_root_user_should_be_able_to_login_with_ssh(driver):\n assert ssh_result['result'], ssh_result['output']\n assert '..' in ssh_result['output'], ssh_result['output']",
"def test_ssh(self):\n self._test_ssh(self.git_ssh_path)",
"def test_6_1_3_etc_group_user(host):\n assert host.file(ETC_PASSWD_DASH).user == 'root'",
"def test_6_1_5_etc_group_dash_user(host):\n assert host.file(ETC_GROUP_DASH).user == 'root'",
"async def target_parser(ctx: commands.Context, target: str) -> tuple:\n if target is None:\n target = ctx.author\n target_found = True\n else:\n try:\n target = await commands.MemberConverter().convert(ctx, target)\n target_found = True\n except commands.BadArgument:\n target_found = False\n return (target_found, target)",
"def ssh_cmd(ctx):\n pass",
"def test_6_1_4_etc_group_user(host):\n assert host.file(ETC_GROUP).user == 'root'",
"def test_get_host_access(self):\n pass",
"def ssh(filter=\".*\",user=\"\"):\n list_instances,list_headers = ec2list(filter=filter)\n if not list_instances:\n print(\"No instance matched the filter\")\n sys.exit(1)\n all_string = \"## D: LA REPONSE D\"\n title = \"Pick the instances to SSH into:\"\n options = [ '{} ---- {} ---- {} ---- {}'.format(\n x[\"name\"],\n x[\"privateip\"],\n x[\"branch\"],\n x[\"launchtime\"],\n x[\"state\"]) for x in list_instances ]\n options.append(all_string)\n\n list_selected = pick(options, title, multiselect=True, default_index=len(options)-1)\n del(options[:-1])\n list_ips = []\n if not list_selected:\n print(\"No host selected, exiting\")\n return\n for option,index in list_selected:\n if option == all_string:\n list_ips = [ x['privateip'] for x in list_instances ]\n break\n else:\n list_ips.append(list_instances[index]['privateip'])\n if len(list_ips) == 1:\n if not user:\n os.system('ssh {}'.format(list_ips[0]))\n else:\n os.system('ssh {}@{}'.format(user,list_ips[0]))\n else:\n if not user:\n os.system('tssh {}'.format(' '.join(list_ips)))\n else:\n os.system('tssh -o \"-l {}\" {}'.format(user,' '.join(list_ips)))",
"def test_scp_no_user(self):\n self.assertEqual(\n general.scp(None,'example.com','my_file','remotedir').command_line,\n ['scp','my_file','example.com:remotedir'])",
"def test_get_host(self):\n pass",
"def test_ssh_with_site(self):\n self._test_ssh_with_site(self.git_ssh_path)",
"def __ssh_tunnel(self):\n\n host = self.sshTunnelDict[\"ssh_ip\"]\n user = self.sshTunnelDict[\"ssh_user\"]\n password = self.sshTunnelDict[\"ssh_password\"]\n sfcs = self.sshTunnelDict[\"target_ip\"]\n\n tunnel_command = 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -C -N -f -L 3306:{0} {1}@{2}'.format(sfcs, user, host)\n retry = 5\n while retry:\n if not self.__check_ssh():\n try:\n ssh_tunnel = pexpect.spawn(tunnel_command)\n ssh_tunnel.expect('password:')\n time.sleep(0.1)\n ssh_tunnel.sendline(password)\n ssh_tunnel.expect(pexpect.EOF)\n retry -= 1\n except:\n raise Exception(\"Create SSH Tunnel Failed: retry 5\")\n else: break",
"def testEstablishWebRTCSshTunnel(self):\n fake_ip_addr = \"1.1.1.1\"\n fake_rsa_key_file = \"/tmp/rsa_file\"\n ssh_user = \"fake_user\"\n self.Patch(utils, \"ReleasePort\")\n self.Patch(utils, \"_ExecuteCommand\")\n self.Patch(subprocess, \"check_call\", return_value=True)\n extra_args_ssh_tunnel = \"-o command='shell %s %h' -o command1='ls -la'\"\n utils.EstablishWebRTCSshTunnel(\n ip_addr=fake_ip_addr, rsa_key_file=fake_rsa_key_file,\n ssh_user=ssh_user, extra_args_ssh_tunnel=None)\n args_list = [\"-i\", \"/tmp/rsa_file\",\n \"-o\", \"UserKnownHostsFile=/dev/null\",\n \"-o\", \"StrictHostKeyChecking=no\",\n \"-L\", \"8443:127.0.0.1:8443\",\n \"-L\", \"15550:127.0.0.1:15550\",\n \"-L\", \"15551:127.0.0.1:15551\",\n \"-N\", \"-f\", \"-l\", \"fake_user\", \"1.1.1.1\"]\n first_call_args = utils._ExecuteCommand.call_args_list[0][0]\n self.assertEqual(first_call_args[1], args_list)\n\n extra_args_ssh_tunnel = \"-o command='shell %s %h'\"\n utils.EstablishWebRTCSshTunnel(\n ip_addr=fake_ip_addr, rsa_key_file=fake_rsa_key_file,\n ssh_user=ssh_user, extra_args_ssh_tunnel=extra_args_ssh_tunnel)\n args_list_with_extra_args = [\"-i\", \"/tmp/rsa_file\",\n \"-o\", \"UserKnownHostsFile=/dev/null\",\n \"-o\", \"StrictHostKeyChecking=no\",\n \"-L\", \"8443:127.0.0.1:8443\",\n \"-L\", \"15550:127.0.0.1:15550\",\n \"-L\", \"15551:127.0.0.1:15551\",\n \"-N\", \"-f\", \"-l\", \"fake_user\", \"1.1.1.1\",\n \"-o\", \"command=shell %s %h\"]\n first_call_args = utils._ExecuteCommand.call_args_list[1][0]\n self.assertEqual(first_call_args[1], args_list_with_extra_args)",
"def test_target_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example\")\n assert bb.target[\"name\"] == \"example\"",
"def handle_args(args: Namespace) -> list:\n # If no targets provided, assume were finding them on network.\n # Once we have targets, if no test given, port/service scan them.\n if not args.target:\n low(\"Target not supplied, running host scan.\")\n hosts = get_hosts(verify_subnet(args.subnet))\n else:\n low(\"Target supplied: {}\".format(args.target))\n hosts = [Host(host) for host in args.target]\n\n if args.user and args.passwd:\n low(\"Username and Password supplied for tests, {}:{}\".format(args.user, args.passwd))\n for host in hosts:\n host.credentials = {'user': args.user, 'passwd': args.passwd}\n\n return hosts"
] | [
"0.7891038",
"0.66818714",
"0.64358",
"0.626521",
"0.62499535",
"0.6192972",
"0.6165336",
"0.6032608",
"0.599522",
"0.59491026",
"0.5891626",
"0.5764562",
"0.56263566",
"0.5608297",
"0.554224",
"0.5528934",
"0.55157524",
"0.5487462",
"0.54454535",
"0.5427359",
"0.5413081",
"0.54024506",
"0.537182",
"0.5370377",
"0.53231806",
"0.53141874",
"0.5304671",
"0.5302097",
"0.53007483",
"0.52912605"
] | 0.84123373 | 0 |
test parse_tgt when only the host set on the ssh cli tgt | def test_parse_tgt_no_user(opts):
host = "localhost"
opts["ssh_user"] = "ssh-usr"
opts["tgt"] = host
with patch("salt.utils.network.is_reachable_host", MagicMock(return_value=False)):
assert not opts.get("ssh_cli_tgt")
client = ssh.SSH(opts)
assert client.parse_tgt["hostname"] == host
assert client.parse_tgt["user"] == opts["ssh_user"]
assert opts.get("ssh_cli_tgt") == host | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_parse_tgt(opts):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n assert not opts.get(\"ssh_cli_tgt\")\n client = ssh.SSH(opts)\n assert client.parse_tgt[\"hostname\"] == host\n assert client.parse_tgt[\"user\"] == user.split(\"@\")[0]\n assert opts.get(\"ssh_cli_tgt\") == user + host",
"def test_expand_target_no_host(opts, tmp_path):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n roster = \"\"\"\n localhost: 127.0.0.1\n \"\"\"\n roster_file = str(tmp_path / \"test_roster_no_host\")\n with salt.utils.files.fopen(roster_file, \"w\") as fp:\n salt.utils.yaml.safe_dump(salt.utils.yaml.safe_load(roster), fp)\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\"salt.roster.get_roster_file\", MagicMock(return_value=roster_file)):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_update_targets_ip_address(opts):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def test_update_targets_no_user(opts):\n host = \"127.0.0.1\"\n opts[\"tgt\"] = host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == host\n client._update_targets()\n assert opts[\"tgt\"] == host",
"def test_expand_target_no_user(opts, roster):\n host = \"127.0.0.1\"\n opts[\"tgt\"] = host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == host\n\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def targets(tgt, tgt_type=\"glob\"):\n\n ssh_known_hosts_file = __opts__.get(\"ssh_known_hosts_file\")\n\n if not os.path.isfile(ssh_known_hosts_file):\n log.error(\"Cannot find SSH known_hosts file\")\n raise OSError(\"Cannot find SSH known_hosts file\")\n if not os.access(ssh_known_hosts_file, os.R_OK):\n log.error(\"Cannot access SSH known_hosts file: %s\", ssh_known_hosts_file)\n raise OSError(\n \"Cannot access SSH known_hosts file: {}\".format(ssh_known_hosts_file)\n )\n\n with salt.utils.files.fopen(ssh_known_hosts_file, \"r\") as hostfile:\n raw = _parse_ssh_known_hosts([line.rstrip() for line in hostfile])\n\n return __utils__[\"roster_matcher.targets\"](raw, tgt, tgt_type, \"ipv4\")",
"def test_update_targets_dns(opts):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def test_expand_target_ip_address(opts, roster):\n host = \"127.0.0.1\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_target_arg(self, parse_input):\n with pytest.warns(SyntaxWarning, match=\"only accept keyword options\"):\n parse_input(\"name testname\\nversion 1.0\\ntarget example (6)\")",
"def test_get_host(self):\n pass",
"def test_expand_target_dns(opts, roster):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n assert opts[\"tgt\"] == host",
"def test_update_expand_target_dns(opts, roster):\n host = \"localhost\"\n user = \"test-user@\"\n opts[\"tgt\"] = user + host\n\n with patch(\"salt.utils.network.is_reachable_host\", MagicMock(return_value=False)):\n client = ssh.SSH(opts)\n assert opts[\"tgt\"] == user + host\n with patch(\n \"salt.roster.get_roster_file\", MagicMock(return_value=\"/etc/salt/roster\")\n ), patch(\n \"salt.client.ssh.compile_template\",\n MagicMock(return_value=salt.utils.yaml.safe_load(roster)),\n ):\n client._expand_target()\n client._update_targets()\n assert opts[\"tgt\"] == host\n assert client.targets[host][\"user\"] == user.split(\"@\")[0]",
"def test_ssh_nodata(self):\n self.assertEqual(parse('', quiet=True), [])",
"def test_ssh_cmd(self):\n self.assertEqual(general.ssh_command('user','example.com',('ls','-l')).command_line,\n ['ssh','[email protected]','ls','-l'])",
"def test_ssh_config1(self):\n self.assertEqual(\n parse(self.f_in['ssh_config1'], quiet=True),\n self.f_json['ssh_config1']\n )",
"def test_ssh_config2(self):\n self.assertEqual(\n parse(self.f_in['ssh_config2'], quiet=True),\n self.f_json['ssh_config2']\n )",
"def test_target_name(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example\")\n assert bb.target[\"name\"] == \"example\"",
"def test_vms_destination(self):\n testflow.step(\"Deactivate host %s\", conf.HOSTS[0])\n assert not ll_hosts.deactivate_host(positive=True, host=conf.HOSTS[0])",
"def testGetHostConfig(self):\n config_path = GetTestFilePath('unified_lab_config/valid_lab/hosts')\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()\n host = pool.GetHostConfig('crystalball1.atc.google.com')\n self.assertEqual('crystalball1.atc.google.com', host.hostname)\n self.assertEqual('lab_user1', host.host_login_name)\n self.assertEqual('crystalball', host.cluster_name)\n self.assertEqual('path/to/config.xml', host.tf_global_config_path)\n self.assertEqual('-F path/to/ssh/config', host.ssh_arg)",
"def test_get_host_access(self):\n pass",
"def test_ssh_cmd_no_user(self):\n self.assertEqual(general.ssh_command(None,'example.com',('ls','-l')).command_line,\n ['ssh','example.com','ls','-l'])",
"def test_vms_host(self):\n testflow.step(\"Check if VM's started on different hosts\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) !=\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )",
"def test_vms_host(self):\n testflow.step(\"Check if VM's started on different hosts\")\n assert (\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[0]) !=\n ll_vms.get_vm_host(vm_name=conf.VM_NAME[1])\n )",
"def test_target_kwarg(self, parse_input):\n bb = parse_input(\"name testname\\nversion 1.0\\ntarget example (shots=10, hbar=0.2)\")\n assert bb.target[\"options\"] == {\"shots\": 10, \"hbar\": 0.2}",
"def test_ssh(self):\n self._test_ssh(self.git_ssh_path)",
"def validate_target(target: str) -> bool:\n try:\n gethostbyname(target)\n except (gaierror, UnicodeError):\n return False\n return True",
"def test_target_repo(self):\n # network may be unavailable, but we are not interested anyway,\n # so we ignore the exitcode\n output = self.run_command(\"selfupdate --check bennr01:dev\", exitcode=None)\n self.assertIn(\"Target: bennr01:dev\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)",
"def test_striping_patch(self):\n self.create_simple_filesystem(synthetic_host(\"myserver\"))\n hosts = [synthetic_host(\"myserver{0:d}\".format(n)) for n in range(4)] * 2\n # keep hosts in alternating order, but supply them grouped\n objects = [\n {\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": synthetic_volume_full(host).id}\n for host in sorted(hosts, key=str)\n ]\n response = self.api_client.patch(\"/api/target/\", data={\"deletions\": [], \"objects\": objects})\n self.assertHttpAccepted(response)\n content = json.loads(response.content)\n self.assertEqual(map(str, hosts), list(self._target_hosts(content[\"targets\"])))",
"def test_arguments_parser(self):\n self.assertEqual('monitoring-dc.app.corp',\n self.plugin.options.hostname)",
"def test_ssh_config4(self):\n self.assertEqual(\n parse(self.f_in['ssh_config4'], quiet=True),\n self.f_json['ssh_config4']\n )"
] | [
"0.7992045",
"0.6546669",
"0.64141375",
"0.6399565",
"0.6154963",
"0.608924",
"0.60557365",
"0.594483",
"0.58621407",
"0.5818358",
"0.58130467",
"0.5710715",
"0.5646345",
"0.56421787",
"0.55266964",
"0.5521712",
"0.55002075",
"0.54592144",
"0.5452184",
"0.5410033",
"0.53792155",
"0.53527784",
"0.53527784",
"0.53218305",
"0.53031147",
"0.5274429",
"0.52690756",
"0.5245404",
"0.521772",
"0.52074176"
] | 0.7612063 | 1 |
test "extra_filerefs" are not excluded from kwargs when preparing the SSH opts | def test_extra_filerefs(tmp_path, opts):
ssh_opts = {
"eauth": "auto",
"username": "test",
"password": "test",
"client": "ssh",
"tgt": "localhost",
"fun": "test.ping",
"ssh_port": 22,
"extra_filerefs": "salt://foobar",
}
roster = str(tmp_path / "roster")
client = salt.client.ssh.client.SSHClient(mopts=opts, disable_custom_roster=True)
with patch("salt.roster.get_roster_file", MagicMock(return_value=roster)):
ssh_obj = client._prep_ssh(**ssh_opts)
assert ssh_obj.opts.get("extra_filerefs", None) == "salt://foobar" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_ssh_kwargs(test_opts):\n opt_key = test_opts[0]\n opt_value = test_opts[1]\n # Is the kwarg in salt.utils.parsers?\n in_parser = test_opts[2]\n\n opts = {\n \"eauth\": \"auto\",\n \"username\": \"test\",\n \"password\": \"test\",\n \"client\": \"ssh\",\n \"tgt\": \"localhost\",\n \"fun\": \"test.ping\",\n opt_key: opt_value,\n }\n client = salt.client.ssh.client.SSHClient(disable_custom_roster=True)\n if in_parser:\n ssh_kwargs = salt.utils.parsers.SaltSSHOptionParser().defaults\n assert opt_key in ssh_kwargs\n\n with patch(\"salt.roster.get_roster_file\", MagicMock(return_value=\"\")), patch(\n \"salt.client.ssh.shell.gen_key\"\n ), patch(\"salt.fileserver.Fileserver.update\"), patch(\"salt.utils.thin.gen_thin\"):\n ssh_obj = client._prep_ssh(**opts)\n assert ssh_obj.opts.get(opt_key, None) == opt_value",
"def testExtraArgsSSHTunnel(self):\n fake_ip_addr = \"1.1.1.1\"\n fake_rsa_key_file = \"/tmp/rsa_file\"\n fake_target_vnc_port = 8888\n target_adb_port = 9999\n ssh_user = \"fake_user\"\n fake_port = 12345\n self.Patch(utils, \"PickFreePort\", return_value=fake_port)\n self.Patch(utils, \"_ExecuteCommand\")\n self.Patch(subprocess, \"check_call\", return_value=True)\n extra_args_ssh_tunnel = \"-o command='shell %s %h' -o command1='ls -la'\"\n utils.AutoConnect(ip_addr=fake_ip_addr,\n rsa_key_file=fake_rsa_key_file,\n target_vnc_port=fake_target_vnc_port,\n target_adb_port=target_adb_port,\n ssh_user=ssh_user,\n client_adb_port=fake_port,\n extra_args_ssh_tunnel=extra_args_ssh_tunnel)\n args_list = [\"-i\", \"/tmp/rsa_file\",\n \"-o\", \"UserKnownHostsFile=/dev/null\",\n \"-o\", \"StrictHostKeyChecking=no\",\n \"-L\", \"12345:127.0.0.1:9999\",\n \"-L\", \"12345:127.0.0.1:8888\",\n \"-N\", \"-f\", \"-l\", \"fake_user\", \"1.1.1.1\",\n \"-o\", \"command=shell %s %h\",\n \"-o\", \"command1=ls -la\"]\n first_call_args = utils._ExecuteCommand.call_args_list[0][0]\n self.assertEqual(first_call_args[1], args_list)",
"def add_extra_args(self):\n self.parser.add_argument(\"--region\", required=False)\n self.parser.add_argument(\"--zone\", required=False)\n self.parser.add_argument(\"--network\", required=False)",
"def split_remote_kwargs(cls, kwargs, include=None, skip=None):\n include = make_list(include) if include else []\n skip = make_list(skip) if skip else []\n transfer_kwargs = {\n name: kwargs.pop(name)\n for name in [\"cache\", \"prefer_cache\", \"retries\", \"retry_delay\"] + include\n if name in kwargs and name not in skip\n }\n return transfer_kwargs, kwargs",
"def test_args_without_secret_file(self):\n args = [self.service, self.env, \"--match\", \"test\"]\n with self.assertRaises(ValueError):\n ef_password.handle_args_and_set_context(args)",
"def test_args_none():\n args = cli.parse_args([])\n assert not args.copy\n assert not args.paste\n assert args.file is None\n assert not args.debug",
"def test_checkCustoms(self):\n self.failUnlessEqual(self.nice.opts['myflag'], \"PONY!\")\n self.failUnlessEqual(self.nice.opts['myparam'], \"Tofu WITH A PONY!\")",
"def parse_options(self, extra):\n options = super().parse_options(extra)\n self.target_image = options.pop(\"target\")\n\n return options",
"def test_exclusive_args():\n with pytest.raises(SystemExit):\n cli.parse_args(['-cf', 'filename'])\n with pytest.raises(SystemExit):\n cli.parse_args(['-cf'])",
"def exclude_opts(cls) -> Tuple[str, ...]:\n return \"required\", \"print_config\", \"config\", \"ngpu\"",
"def test_args_without_match(self):\n args = [self.service, self.env, \"--secret_file\", \"test_data/parameters/test.cnf.parameters.json\"]\n with self.assertRaises(ValueError):\n ef_password.handle_args_and_set_context(args)",
"def test_docker_args_not_set(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n \"\"\"\n )\n\n config = scuba.config.load_config(\".scuba.yml\")\n assert config.docker_args is None",
"def _accept_or_ignore_job_kwargs(self, _exclude_errors=(), **kwargs):\n errors = {}\n if kwargs:\n for field_name in kwargs.keys():\n errors[field_name] = [_(\"Field is not allowed on launch.\")]\n return ({}, kwargs, errors)",
"def split_transfer_kwargs(kwargs, skip=None):\n skip = make_list(skip) if skip else []\n transfer_kwargs = {\n name: kwargs.pop(name)\n for name in [\"cache\", \"prefer_cache\", \"retries\", \"retry_delay\"]\n if name in kwargs and name not in skip\n }\n return transfer_kwargs, kwargs",
"def test_cli_plus_defaults(mock_zip_file):\n\n option_subset = {'zip_path': str(mock_zip_file)}\n result = Packager.from_cli(['-z', str(mock_zip_file)]).options\n assert_dict_contains_subset(option_subset, result)\n\n option_subset = {'fields': ['kDefinition']}\n result = Packager.from_cli(['-f', 'kDefinition']).options\n assert_dict_contains_subset(option_subset, result)\n\n option_subset = {'fields': ['kDefinition', 'kXerox']}\n result = Packager.from_cli(['-f', 'kDefinition', 'kXerox']).options\n assert_dict_contains_subset(\n option_subset, result, msg=\"fields -f allows multiple fields.\"\n )\n\n option_subset = {'fields': ['kDefinition', 'kXerox'], 'destination': 'data/ha.csv'}\n result = Packager.from_cli(\n ['-f', 'kDefinition', 'kXerox', '-d', 'data/ha.csv']\n ).options\n assert_dict_contains_subset(\n option_subset, result, msg=\"fields -f allows additional arguments.\"\n )\n\n result = Packager.from_cli(['--format', 'json']).options\n option_subset = {'format': 'json'}\n assert_dict_contains_subset(option_subset, result, msg=\"format argument works\")",
"def _get_argparse_kwargs(self, group, **kwargs):\n kwargs = super(_ConfigFileOpt, self)._get_argparse_kwargs(group)\n kwargs['action'] = self.ConfigFileAction\n return kwargs",
"def get_opt(self):\n opts, args = self.parser.parse_args()\n if opts.path is not None:\n opts.path = os.path.abspath(os.path.expandvars(os.path.expanduser(opts.path)))\n if opts.output == \"-\":\n opts.output = sys.__stdout__\n else:\n filepath = os.path.dirname(os.path.realpath(os.path.expanduser(opts.output)))\n if not os.access(filepath,os.W_OK):\n self.parser.error(\"Cannot write to %s\"%filepath)\n if os.path.isfile(opts.output):\n self.parser.error(\"File already exists: %s\"%opts.output) \n if not opts.dryrun:\n try: \n opts.output = open(opts.output,\"w\")\n except:\n self.parser.error(\"Cannot write to %s\"%opts.output)\n else:\n opts.output = sys.__stdout__\n try:\n opts.whitelist = open(opts.whitelist)\n except:\n self.parser.error(\"Cannot open whitelist.\")\n return opts",
"def sufficient_options(self):\n has_token = self.opts.get('token')\n has_project_domain_or_tenant = (self.opts.get('project_id') or\n (self.opts.get('project_name') and\n (self.opts.get('user_domain_name') or\n self.opts.get('user_domain_id'))) or\n (self.opts.get('tenant_id') or\n self.opts.get('tenant_name')))\n has_credential = (self.opts.get('username')\n and has_project_domain_or_tenant\n and self.opts.get('password')\n and self.opts.get('auth_url'))\n missing = not (has_token or has_credential)\n if missing:\n missing_opts = []\n opts = ['token', 'endpoint', 'username', 'password', 'auth_url',\n 'tenant_id', 'tenant_name']\n for opt in opts:\n if not self.opts.get(opt):\n missing_opts.append(opt)\n raise exceptions.AuthPluginOptionsMissing(missing_opts)",
"def new_comm_kwargs(cls, *args, **kwargs):\n kwargs.setdefault('address', 'file.txt')\n return args, kwargs",
"def pre_process(self, **kwargs):\n if 'skip' in kwargs and kwargs['skip'] is True:\n self.skip_prompt = True\n # create app directory\n self.create_app_dir()\n # generate additional params\n additional = kwargs.get('additional')\n params = self.default_additional\n if additional:\n for a in additional:\n idx = a.find('=')\n if idx > 0:\n params[a[0:idx]] = a[idx + 1:]\n kwargs['additional_params'] = params\n return kwargs",
"def Filter_Option_Parser(argv, extra_opt, ignore_outfile = False):\n opts = [(\"tape\" , int, None, False, True),\n (\"steps\" , int, None, False, True),\n (\"infile\" , str, None, True , True),\n (\"outfile\" , str, None, False, True),\n (\"force\" , bool, False, False, False),\n (\"log_number\", int, None, False, True)] + extra_opt\n ignore_opts = []\n if ignore_outfile:\n ignore_opts.append(\"outfile\")\n opts, args = Option_Parser(argv, opts, help_flag = True, no_mult = True,\n ignore_opts = ignore_opts)\n\n opts[\"infilename\"] = opts[\"infile\"]\n opts[\"infile\"] = open_infile(opts[\"infilename\"])\n\n if not ignore_outfile:\n opts[\"states\"], opts[\"symbols\"], tape, steps = Read_Attributes(opts[\"infile\"])\n\n # Tape length and max num steps default to those from the input file, but\n # can be changed by command line options.\n if not opts[\"tape\"]:\n opts[\"tape\"] = tape\n if not opts[\"steps\"]:\n opts[\"steps\"] = steps\n\n if not opts[\"outfile\"]:\n # Default output filename is based off of parameters.\n opts[\"outfile\"] = \"%dx%d.out\" % (opts[\"states\"], opts[\"symbols\"])\n\n opts[\"outfilename\"] = opts[\"outfile\"]\n opts[\"outfile\"] = open_outfile(opts[\"outfilename\"], opts[\"force\"])\n if not opts[\"outfile\"]:\n sys.exit(1)\n\n return opts, args",
"def test_cli_args():\n expected = dict(\n paths=[\"path1\", \"path2\"],\n exclude=[\"file*.py\", \"dir/\"],\n ignore_decorators=[\"deco1\", \"deco2\"],\n ignore_names=[\"name1\", \"name2\"],\n make_whitelist=True,\n min_confidence=10,\n sort_by_size=True,\n verbose=True,\n )\n result = _parse_args(\n [\n \"--exclude=file*.py,dir/\",\n \"--ignore-decorators=deco1,deco2\",\n \"--ignore-names=name1,name2\",\n \"--make-whitelist\",\n \"--min-confidence=10\",\n \"--sort-by-size\",\n \"--verbose\",\n \"path1\",\n \"path2\",\n ]\n )\n assert isinstance(result, dict)\n assert result == expected",
"def test_args_secret_file(self):\n args = [self.service, self.env, \"--length\", \"10\", \"--secret_file\",\n \"test_data/parameters/test.cnf.parameters.json\", \"--match\", \"test\"]\n context = ef_password.handle_args_and_set_context(args)\n self.assertEqual(context.env, self.env)\n self.assertEqual(context.service, self.service)\n self.assertEqual(context.length, 10)\n self.assertEqual(context.secret_file, \"test_data/parameters/test.cnf.parameters.json\")\n self.assertEqual(context.match, \"test\")",
"def _additional_option(self):\n pass",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dryrun', '-n', action='store_true',\n help=('check TileDB/SSH files differences only, '\n 'does not perform any copy'))\n parser.add_argument('--debug', '-d', action='store_true',\n help=('prints debug messages'))\n parser.add_argument('--tdmq-url', action='store', type=str, required=True,\n dest='tdmq_url',\n help=('tdmq server and path of the form'))\n parser.add_argument('--tdmq-auth-token', action='store', type=str, required=True,\n dest='tdmq_auth_token',\n help=('tdmq server authorization token'))\n parser.add_argument('--ssh-url', action='store', type=str, required=True,\n dest='ssh_url',\n help=(\n 'ssh server and path of the form: '\n '<USER>@<NAME_NODE>:<PORT>/PATH'))\n parser.add_argument('--ssh-key', action='store', type=str, required=True,\n dest='ssh_key',\n help=('key for ssh server authentication'))\n parser.add_argument('--desc-file', action='store', type=str, required=True,\n dest='source_desc_file',\n help=('source descrption file'))\n\n # Only one of --hours and --sync can be provided on command line\n sync_group = parser.add_mutually_exclusive_group()\n sync_group.add_argument('--hours', action='store',\n dest='hours', default=24, type=int,\n help=('uploads only the radar images '\n 'more recent than the given number of hours'))\n sync_group.add_argument('--sync', '-s', action='store_true',\n dest='sync',\n help=('upload all the missing radar images'))\n\n args = parser.parse_args()\n\n # If the debug flag is set, print all messages\n if args.debug:\n logging.basicConfig(\n level=logging.DEBUG,\n format='[%(levelname)s] %(message)s')\n else:\n logging.basicConfig(\n level=logging.INFO,\n format='[%(levelname)s] %(message)s')\n\n logging.getLogger(\"paramiko\").setLevel(logging.WARNING)\n\n (_ssh_username, _ssh_hostname, _ssh_port,\n _ssh_root) = check_ssh_url(args.ssh_url)\n if _ssh_hostname is None:\n logging.error(\n 'Wrong, incomplete or absent SSH path: \\'%s\\'', args.ssh_url)\n sys.exit(1)\n\n if os.path.isfile(args.ssh_key) == False:\n logging.error(\n 'SSH key file not found: \\'%s\\'', args.ssh_key)\n sys.exit(1)\n\n if os.path.isfile(args.source_desc_file) == False:\n logging.error(\n 'Source description file not found: \\'%s\\'', args.source_desc_file)\n sys.exit(1)\n\n _source_desc = load_description(args.source_desc_file)\n\n ssh_client = SSHClient(\n username=_ssh_username,\n hostname=_ssh_hostname,\n port=_ssh_port,\n key_file=args.ssh_key,\n root_dir=_ssh_root\n )\n\n _folder_list = ssh_client.list_folder()\n\n def _name_filter(file_name):\n # Is a radar image file\n if re.match(r'cag01est2400\\d{4}-\\d{2}-\\d{2}_\\d{2}:\\d{2}:\\d{2}.png', file_name):\n return True\n else:\n return False\n\n # Filter out not image files\n _image_list = list(filter(_name_filter, _folder_list))\n\n # Instantiates a TDMQ client, retrieves the source if exists or registers a\n # new one\n tdmq_client = Client(args.tdmq_url, args.tdmq_auth_token)\n sources = tdmq_client.find_sources({'id': _source_desc['id']})\n if len(sources) > 0:\n assert len(sources) == 1\n source = sources[0]\n logging.info(f\"Using source {source.tdmq_id} for {source.id}.\")\n else:\n source = tdmq_client.register_source(_source_desc)\n logging.info(f\"Created source {source.tdmq_id} for {source.id}.\")\n\n try:\n ts = source.timeseries()\n times = ts.time\n last_image_time = max(sorted(times))\n _last_slot = max(ts.tiledb_indices)\n except Exception as ex: # FIXME too general\n times = []\n last_image_time = datetime.datetime(1970, 1, 1, 0, 0, 0)\n _last_slot = 0\n\n # Builds the list of file to download\n if args.sync:\n _images_to_ingest = ingest_missings(_image_list, times)\n else:\n start_time = (\n datetime.datetime.now() - datetime.timedelta(hours=args.hours)\n ).replace( minute=0, second=0, microsecond=0)\n\n logging.info(f\"Requested images from {start_time} (last local image is {last_image_time}).\")\n if start_time > last_image_time:\n last_image_time = start_time\n\n _images_to_ingest = ingest_latests(last_image_time, _image_list)\n\n logging.info(\n f\"Remote files: {len(_folder_list)}, remote images: \"\n f\"{len(_image_list)}, images to sync: {len(_images_to_ingest)}.\")\n\n for _image in _images_to_ingest:\n _timestamp = datetime.datetime.strptime(\n _image, 'cag01est2400%Y-%m-%d_%H:%M:%S.png')\n _last_slot = _last_slot + 1\n\n if args.dryrun:\n logging.debug(f\"[DRY-RUN] Ingesting data at time {_timestamp}, slot {_last_slot}.\")\n else:\n logging.debug(f\"Ingesting data at time {_timestamp}, slot {_last_slot}.\")\n _data = fetch_radar_data(ssh_client, _image)\n source.ingest(_timestamp, _data, _last_slot)\n logging.info(f\"Done ingesting.\")",
"def generate_options(self):\n super(CreateMachine, self).generate_options()\n options = [\"image\", \"flavor\", \"files\", \"meta\", \"scheduler_hints\"]\n for option in self.command_options:\n if option['dest'] in options:\n option['action'] = \"append\"",
"def missing_option(context):\n context.config_file = './features/files/missing-option.cfg'",
"def test_checkParameters(self):\n self.failUnlessEqual(self.nice.opts['long'], \"Alpha\")\n self.failUnlessEqual(self.nice.opts['another'], \"Beta\")\n self.failUnlessEqual(self.nice.opts['longonly'], \"noshort\")\n self.failUnlessEqual(self.nice.opts['shortless'], \"Gamma\")",
"def add_extra_args(self):\n super(AwsNetworkCleanupMethod, self).add_extra_args()\n self.parser.add_argument(\"--custom_payload\", required=False,\n help=\"JSON payload of per-region data.\")",
"def test_ssh_cmd_no_user(self):\n self.assertEqual(general.ssh_command(None,'example.com',('ls','-l')).command_line,\n ['ssh','example.com','ls','-l'])"
] | [
"0.585128",
"0.5742932",
"0.5614613",
"0.55098736",
"0.5377966",
"0.53588086",
"0.5335075",
"0.5329998",
"0.5300597",
"0.52753556",
"0.5273424",
"0.5244408",
"0.523141",
"0.52186584",
"0.52164793",
"0.5213526",
"0.5114157",
"0.50991726",
"0.5093965",
"0.50867444",
"0.50815296",
"0.5069093",
"0.5066984",
"0.50483376",
"0.5033157",
"0.5031599",
"0.5021249",
"0.5018547",
"0.5015228",
"0.5014205"
] | 0.6820886 | 0 |
The main function used to produce a model ready for compression finetuning from an original PyTorch model and a configuration object. dummy_forward_fn | def create_compressed_model(
model: Module,
config: NNCFConfig,
compression_state: Optional[Dict[str, Any]] = None,
dummy_forward_fn: Callable[[Module], Any] = None,
wrap_inputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,
wrap_outputs_fn: Callable[[Tuple, Dict], Tuple[Tuple, Dict]] = None,
dump_graphs=True,
) -> Tuple[CompressionAlgorithmController, NNCFNetwork]:
if isinstance(model, NNCFNetwork):
raise RuntimeError(
"The model object has already been compressed.\n"
"NNCF for PyTorch modifies the model object in-place, and repeat calls to "
"`nncf.torch.create_compressed_model` with the same model object passed as argument "
"will lead to an incorrect attempt to compress the model twice.\n"
"Make sure that the model object you are passing has not already been compressed (for "
"instance, by testing `if isinstance(model, nncf.torch.nncf_network.NNCFNetwork)`).\n"
"If you are encountering this in a Jupyter notebook context - make sure that when "
"re-running cells involving `nncf.torch.create_compressed_model` the original model object "
"is also re-created (via constructor call)."
)
if config.get("target_device") == "VPU":
warning_deprecated("VPU device is deprecated and will no longer be supported in the future.")
set_debug_log_dir(config.get("log_dir", "."))
is_legacy_model_state_dict = (
compression_state is not None
and BaseController.BUILDER_STATE not in compression_state
and BaseController.CONTROLLER_STATE not in compression_state
)
maybe_convert_legacy_names_in_compress_state(compression_state)
should_init = compression_state is None
nncf_network = create_nncf_network(model, config, dummy_forward_fn, wrap_inputs_fn, wrap_outputs_fn)
if dump_graphs and is_main_process():
nncf_network.nncf.get_graph().visualize_graph(osp.join(config.get("log_dir", "."), "original_graph.dot"))
builder = create_compression_algorithm_builder(config, should_init)
is_state_loadable = not is_legacy_model_state_dict and compression_state is not None
if is_state_loadable:
builder.load_state(compression_state[BaseController.BUILDER_STATE])
compressed_model = builder.apply_to(nncf_network)
compression_ctrl = builder.build_controller(compressed_model)
if is_state_loadable:
compression_ctrl.load_state(compression_state[BaseController.CONTROLLER_STATE])
compressed_model.nncf.set_compression_controller(compression_ctrl)
# Required to ensure that the model leaving create_compressed_model has correct compressed graph.
# In particular, this is currently required for correct functioning of RNNs.
compressed_model.nncf.rebuild_graph()
try:
if is_legacy_model_state_dict:
from nncf.torch import load_state # pylint: disable=cyclic-import
state_dict_to_load = compression_state.get("state_dict", compression_state)
load_state(compressed_model, state_dict_to_load, is_resume=True)
finally:
if dump_graphs and is_main_process():
compressed_model_graph = compressed_model.nncf.get_graph()
compressed_model_graph.visualize_graph(osp.join(config.get("log_dir", "."), "compressed_graph.dot"))
synchronize_all_processes_in_distributed_mode()
return compression_ctrl, compressed_model | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_export_pytorch_model(self):\n pytorch_model = PyTorchLinear()\n dummy_input = torch.empty(10, 10)\n\n with io.BytesIO() as f:\n onnx_converter._export_pytorch_model(f, pytorch_model, dummy_input)",
"def __init__(\n self,\n d_model,\n ff1_hsize=1024,\n ff1_dropout=0.2,\n n_head=4,\n mha_dropout=0.2,\n kernel_size=3,\n conv_dropout=0.2,\n ff2_hsize=1024,\n ff2_dropout=0.2\n ):\n super(Conformer, self).__init__()\n self.device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n self.ff_module1 = Residual(\n module=FFModule(\n d_model=d_model,\n h_size=ff1_hsize,\n dropout=ff1_dropout\n ),\n half=True\n )\n self.mha_module = Residual(\n module=MHAModule(\n d_model=d_model,\n n_head=n_head,\n dropout=mha_dropout\n )\n )\n self.conv_module = Residual(\n module=ConvModule(\n in_channels=d_model,\n kernel_size=kernel_size,\n dropout=conv_dropout\n )\n )\n self.ff_module2 = Residual(\n FFModule(\n d_model=d_model,\n h_size=ff2_hsize,\n dropout=ff2_dropout\n ),\n half=True\n )",
"def initialize_model(model_name, num_classes, feature_extract, verbose=False):\n\n model_ft = None\n\n if model_name == \"resnet\":\n \"\"\" Resnet18\n \"\"\"\n model_ft = models.resnet18(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"alexnet\":\n \"\"\" Alexnet\n \"\"\"\n model_ft = models.alexnet(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"vgg\":\n \"\"\" VGG11_bn\n \"\"\"\n model_ft = models.vgg11_bn(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"squeezenet\":\n \"\"\" Squeezenet\n \"\"\"\n with warnings.catch_warnings(): # temporarily suppress warnings about deprecated functions\n warnings.simplefilter(\"ignore\")\n model_ft = models.squeezenet1_0(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1, 1), stride=(1, 1))\n model_ft.num_classes = num_classes\n\n elif model_name == \"densenet\":\n \"\"\" Densenet\n \"\"\"\n model_ft = models.densenet121(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"inception\":\n \"\"\" Inception v3\n Be careful, expects (299,299) sized images and has auxiliary output\n \"\"\"\n model_ft = models.inception_v3(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n # Handle the auxilary net\n num_ftrs = model_ft.AuxLogits.fc.in_features\n model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)\n # Handle the primary net\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n\n else: # Unreachable\n exit()\n\n # Gather the parameters to be optimized\n params_to_update = list(filter(lambda p: p.requires_grad, model_ft.parameters()))\n\n # Print model info\n if verbose:\n print()\n print(model_ft)\n print()\n print(\"Params to learn:\")\n for name, param in model_ft.named_parameters():\n if param.requires_grad:\n print('\\t', name)\n\n return model_ft, params_to_update",
"def test_forward(self):\r\n # CIFAR\r\n model = densenet.DenseNet(\r\n depth=40,\r\n Block=densenet.BasicBlock,\r\n growth_rate=12,\r\n mask=True,\r\n compression_rate=1.0,\r\n num_classes=100,\r\n )\r\n model.forward(torch.randn((1, 3, 32, 32)))",
"def main():\n\n downloadData()\n\n parser = argparse.ArgumentParser(description=\"Train model or test model (default)\")\n parser.add_argument(\"--train-model\", action=\"store_true\", default=False)\n parser.add_argument(\"--num-channels\", type=int, help=\"Number of channels in lowest dimension\", default=8)\n\n arg_parser = parser.parse_args()\n\n if arg_parser.train_model:\n encoder, generator = train(channels=arg_parser.num_channels)\n torch.save(encoder.state_dict(), f\"../models/encoder_{arg_parser.num_channels}.model\")\n torch.save(generator.state_dict(), f\"../models/generator_{arg_parser.num_channels}.model\")\n else:\n validate_models(VALIDATE_CHANNELS)",
"def setup(self):\n print(\"setup\")\n \n self.modelToUse = 1\n if self.train:\n print(\"train\")\n else:\n print(\"no train\")\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.envSize = 17\n \n #init model\n if self.train or not os.path.isfile(\"my-saved-model.pt\"):\n self.logger.info(\"Setting up model from scratch.\")\n if self.modelToUse == 0:\n self.policy_net = Model_global_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_global_view(self.envSize, self.envSize, 6).to(device)\n elif self.modelToUse == 1:\n self.policy_net = Model_local_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_local_view(self.envSize, self.envSize, 6).to(device)\n else:\n self.policy_net = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model.load_state_dict(self.policy_net.state_dict())\n self.model.eval()\n else:\n self.logger.info(\"Loading model from saved state.\")\n with open(\"my-saved-model.pt\", \"rb\") as file:\n if self.modelToUse == 0:\n self.model = Model_global_view(self.envSize, self.envSize, 6)\n elif self.modelToUse == 1:\n self.model = Model_local_view(self.envSize, self.envSize, 6)\n else:\n self.model = Model_combined_view(self.envSize, self.envSize, 6)\n if torch.cuda.is_available():\n self.model.load_state_dict(torch.load(file))\n self.model.to(device)\n else:\n self.model.load_state_dict(torch.load(file, map_location=device))",
"def initiate(self):\n # if self.opt.checkpoint_encoder:\n # self.load(self.opt.checkpoint_encoder, self.opt.checkpoint_decoder)\n # else:\n # start fresh.\n self.model = Transformer(\n self.opt.src_vocab_size,\n self.opt.tgt_vocab_size,\n self.opt.max_token_seq_len,\n tgt_emb_prj_weight_sharing=self.opt.proj_share_weight,\n emb_src_tgt_weight_sharing=self.opt.embs_share_weight,\n d_k=self.opt.d_k,\n d_v=self.opt.d_v,\n d_model=self.opt.d_model,\n d_word_vec=self.opt.d_word_vec,\n d_inner=self.opt.d_inner_hid,\n n_layers=self.opt.layers,\n n_head=self.opt.n_head,\n dropout=self.opt.dropout).to(self.device)\n \n for p in self.model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)",
"def forward(self, x):\n sources = list()\n loc = list()\n conf = list()\n\n # apply vgg up to conv4_3 relu\n #print('Reached start of vgg')\n for k in self.vgg._modules.keys():\n if int(k) < 23:\n #print('Reached ' + k + ' ', x.size())\n x = self.vgg._modules[k].cuda()(x)\n #print('Reached L2Norm')\n s = self.L2Norm(x)\n sources.append(s)\n\n #print('Reached after L2Norm')\n # apply vgg up to fc7\n for k in self.vgg._modules.keys():\n if int(k) >= 23:\n #print('Reached ' + k + ' ', x.size())\n x = self.vgg._modules[k].cuda()(x)\n sources.append(x)\n #print('Reached end of VGG')\n\n # apply extra layers and cache source layer outputs\n for k, v in enumerate(self.extras):\n x = F.relu(v(x), inplace=True)\n if k % 2 == 1:\n sources.append(x)\n\n # apply multibox head to source layers\n for (x, l, c) in zip(sources, self.loc, self.conf):\n loc.append(l(x).permute(0, 2, 3, 1).contiguous())\n conf.append(c(x).permute(0, 2, 3, 1).contiguous())\n\n loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)\n conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)\n\n if self.phase == \"test\":\n output = self.detect(\n loc.view(loc.size(0), -1, 4), # loc preds\n self.softmax(conf.view(-1, self.num_classes)), # conf preds\n self.priors # default boxes\n )\n else:\n output = (\n loc.view(loc.size(0), -1, 4),\n conf.view(conf.size(0), -1, self.num_classes),\n self.priors\n )\n return output",
"def __init__(self, embed_size=256, finetune=False, cnn_type='resnet50',\n use_abs=False, no_imgnorm=False):\n super(EncoderImageFull, self).__init__()\n self.embed_size = embed_size\n self.no_imgnorm = no_imgnorm\n self.use_abs = use_abs\n\n # Load a pre-trained model\n model = get_model(name=cnn_type, num_classes=5607)\n model = torch.nn.DataParallel(model)\n model.to(\"cuda\")\n checkpoint = torch.load(\"/mnt/data2/betty/webvision_train/results/resnet50/5000classes_onemonth/model_best.tar\")\n model.load_state_dict(checkpoint['state_dict'])\n \n print(\"Successfully load the saved model at model_best.tar\") \n\n self.cnn = model\n\n\n # For efficient memory usage.\n for param in self.cnn.parameters():\n param.requires_grad = False\n\n # Replace the last fully connected layer of CNN with a new one\n \n if cnn_type.startswith('resnet'):\n self.fc = nn.Linear(self.cnn.module.fc.in_features, embed_size)\n self.cnn.module.fc = nn.Sequential()\n else:\n print(\"error in chosing the architecture\")\n return\n\n self.init_weights()",
"def forward(self, x: torch.Tensor) -> torch.Tensor:\n model_output = None\n #######################################################################\n # Student code begins\n #######################################################################\n\n (N,C,H,W) = x.shape\n\n conv_features = self.conv_layers(x)\n \n flat_features = conv_features.reshape(-1, 500)\n model_output = self.fc_layers(flat_features)\n\n\n #######################################################################\n # Student code ends\n #######################################################################\n return model_output",
"def __init__(self, n_latent_features=1024, reduced_size=64, activation=ReLU()):\n\n super().__init__()\n self.logger = logging.getLogger(AutoEncoderConvolutional.__name__)\n self.n_latent_features = n_latent_features\n self.reduced_size = reduced_size\n self.middle_layer_size = int(16 * self.reduced_size / 4 * self.reduced_size / 4)\n self.activation = activation\n\n self.logger.info(\"Construct model..\")\n self.encode_conv1 = nn.Conv2d(3, 6, kernel_size=3, padding=1)\n self.encode_pool1 = nn.MaxPool2d(2, stride=2)\n self.encode_conv2 = nn.Conv2d(6, 16, kernel_size=3, padding=1)\n self.encode_pool2 = nn.MaxPool2d(2, stride=2)\n self.encode_fc = nn.Linear(self.middle_layer_size, self.n_latent_features)\n\n self.decode_fc = nn.Linear(self.n_latent_features, self.middle_layer_size)\n self.decode_conv1 = nn.ConvTranspose2d(16, 6, kernel_size=2, stride=2)\n self.decode_conv2 = nn.ConvTranspose2d(6, 3, kernel_size=2, stride=2)\n\n self.reset_weights()\n\n self.logger.info(\"Finished instantiation\")",
"def _setup_model(self) -> torch.nn.Sequential:\r\n\r\n # setting up model\r\n ids_ = self.get_hyperparam().get_dim_ids()\r\n if self.get_hyperparam().get_value(ids_[13]):\r\n init_ = lambda mod: self._default_weight_bias_init(mod,\r\n self.get_hyperparam().get_value(ids_[14]),\r\n self.get_hyperparam().get_value(ids_[15]),\r\n self.get_hyperparam().get_value(ids_[16]))\r\n\r\n modules = []\r\n for hd in range(int(self.get_hyperparam().get_value(ids_[3]))+1):\r\n if hd == 0:\r\n act_input_size = self.get_hyperparam().get_value(ids_[0])\r\n output_size = self.get_hyperparam().get_value(ids_[4])[hd]\r\n act_fct = self.get_hyperparam().get_value(ids_[5])[hd]()\r\n elif hd == self.get_hyperparam().get_value(ids_[3]):\r\n act_input_size = self.get_hyperparam().get_value(ids_[4])[hd-1]\r\n output_size = self.get_hyperparam().get_value(ids_[1])\r\n act_fct = self.get_hyperparam().get_value(ids_[6])()\r\n else:\r\n act_input_size = self.get_hyperparam().get_value(ids_[4])[hd-1]\r\n output_size = self.get_hyperparam().get_value(ids_[4])[hd]\r\n act_fct = self.get_hyperparam().get_value(ids_[5])[hd]()\r\n \r\n if self.get_hyperparam().get_value(ids_[13]):\r\n modules.append(init_(torch.nn.Linear(int(act_input_size), int(output_size))))\r\n else:\r\n modules.append(torch.nn.Linear(int(act_input_size), int(output_size)))\r\n modules.append(act_fct)\r\n\r\n model = torch.nn.Sequential(*modules)\r\n \r\n # add process to the model\r\n try:\r\n model = self._add_init(model)\r\n except:\r\n pass \r\n \r\n self._loss_fct = self.get_hyperparam().get_value(ids_[8])()\r\n self._optimizer = self.get_hyperparam().get_value(ids_[7])(model.parameters(), lr=self.get_hyperparam().get_value(ids_[12]))\r\n self._sampling_seed = self.get_hyperparam().get_value(ids_[11])\r\n \r\n return model",
"def build_model(self) -> nn.Module:\n pass",
"def create_nncf_network(\n model: torch.nn.Module,\n config: NNCFConfig,\n dummy_forward_fn: Callable[[Module], Any] = None,\n wrap_inputs_fn: Callable = None,\n wrap_outputs_fn: Callable = None,\n) -> NNCFNetwork:\n\n if dummy_forward_fn is not None and wrap_inputs_fn is None:\n raise ValueError(\n \"A custom dummy forward function was specified, but the corresponding input wrapping function \"\n \"was not. In case a custom dummy forward function is specified for purposes of NNCF graph \"\n \"building, then the wrap_inputs_fn parameter MUST also be specified and be consistent with \"\n \"the input wrapping done in dummy_forward_fn.\"\n )\n\n # Preserve `.training`/`.requires_grad` state since we will be building NNCFNetwork in `.eval` mode\n with training_mode_switcher(model, is_training=False):\n # Compress model that will be deployed for the inference on target device. No need to compress parts of the\n # model that are used on training stage only (e.g. AuxLogits of Inception-v3 model) or unused modules with\n # weights. As a consequence, no need to care about spoiling BN statistics, as they're disabled in eval mode.\n\n input_info_list = create_input_infos(config)\n scopes_without_shape_matching = config.get(\"scopes_without_shape_matching\", [])\n ignored_scopes = config.get(\"ignored_scopes\")\n target_scopes = config.get(\"target_scopes\")\n\n nncf_network = NNCFNetwork(\n model,\n input_infos=input_info_list,\n dummy_forward_fn=dummy_forward_fn,\n wrap_inputs_fn=wrap_inputs_fn,\n wrap_outputs_fn=wrap_outputs_fn,\n ignored_scopes=ignored_scopes,\n target_scopes=target_scopes,\n scopes_without_shape_matching=scopes_without_shape_matching,\n )\n\n nncf_network.nncf.get_tracing_context().disable_trace_dynamic_graph()\n\n synchronize_all_processes_in_distributed_mode()\n return nncf_network",
"def __init__(self, model, h_units, weight_decay, dropout_rate, num_of_outputs, training_name):\n \n # inherit class constructor attributes from tf.keras.Model\n super(fc_model, self).__init__()\n \n # model name\n self.model_name = None\n \n # type of model architecture\n self.model = model\n \n # checkpoint directory\n self.checkpoint_dir = \"../Saved_Models/\" + training_name + \"_\" + \"best_models/\"\n \n # checkpoint filepath \n self.checkpoint_path = None\n \n # create intended number of dqn_block attributes\n self.block_1 = fc_block(h_units[0], weight_decay[0], dropout_rate[0])\n self.block_2 = fc_block(h_units[1], weight_decay[1], dropout_rate[1])\n self.block_3 = fc_block(h_units[2], weight_decay[2], dropout_rate[2])\n \n # create final output layer attribute \n if self.model == \"DDPG_Actor\":\n \n # output layer with continuous action for each joint\n self.outputs = tf.keras.layers.Dense(num_of_outputs, activation = 'tanh')\n \n elif self.model == \"DDPG_Critic\": \n\n # output layer is state-action value, Q, for a given state and action\n self.outputs = tf.keras.layers.Dense(num_of_outputs)",
"def encode(self, model: nn.Module, dummy_input: torch.Tensor):\n _, path = tempfile.mkstemp()\n\n try:\n torch.onnx.export(model, dummy_input, path, verbose=True)\n with open(path, \"rb\") as fd:\n converted_model = fd.read()\n except Exception as e:\n converted_model = None\n print(f\"Error occurred: {e}\\n\")\n finally:\n os.remove(path)\n return converted_model",
"def cli(sys_argv: List[str]):\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--model_definition', type=str,\n help='Path to json model definition')\n\n parser.add_argument('--model_state_path', type=str,\n help='Path where to the trained parameters')\n\n parser.add_argument('--data_path', type=str, default=TEST_PATH,\n help='path to the pickled dataframe on which prediction should be made')\n\n parser.add_argument('--numerical_preprocessor', type=str, default=NUMERICAL_PREPROCESSOR_SAVE_PATH,\n help='Path of the saved numerical preprocessor')\n\n parser.add_argument('--categorical_preprocessor', type=str, default=CATEGORICAL_PREPROCESSOR_SAVE_PATH,\n help='Path to the saved categorical preprocessor')\n\n parser.add_argument('--output_directory', type=str, default=RESULTS_DIR,\n help='Path where to save the prediction of the experiment')\n\n args = parser.parse_args(sys_argv)\n\n # # ---------- parse config file ---------- # #\n config: dict = json.load(open(args.model_definition, 'r'))\n\n model_class: str = config['model_class']\n model_name: str = config['model_name']\n numerical_input_features: List[str] = config['data']['numerical_input_features']\n categorical_input_features: List[str] = config['data']['categorical_input_features']\n output_features: List[str] = config['data']['output_features']\n batch_size_test: int = config['data']['batch_size_test']\n\n device = torch.device(CUDA if torch.cuda.is_available() else CPU)\n\n # # ---------- parse model state ---------- # #\n model_state = load_model_state(args.model_state_path, device)\n\n model_hyperparameters: dict = model_state['hyperparameters']\n model_hyperparameters.update(config['model'])\n model_hyperparameters['device']: torch.device = device\n model_weights: dict = model_state['best_model_state_dict']\n\n # # ---------- initialize model ---------- # #\n model = REGISTERED_MODELS[model_class](**model_hyperparameters).to(device)\n model.load(model_weights)\n\n # # ---------- preprocess data for inference ---------- # #\n test_loader = preprocess_for_inference(\n args.data_path,\n numerical_input_features,\n categorical_input_features,\n output_features,\n args.numerical_preprocessor,\n args.categorical_preprocessor,\n batch_size_test=batch_size_test\n )\n\n # # ---------- compute and save predictions ---------- # #\n predictions = model.predict(test_loader)\n\n # save predictions\n data_file_name = os.path.basename(args.data_path)\n data_file_name = os.path.splitext(data_file_name)[0] # remove extension\n model_path = '{}/predictions_{}_{}.pickle'.format(args.output_directory, model_name, data_file_name)\n print(' [predict] Saving predictions at: `{}`'.format(model_path))\n file_utils.save_to_pickle(\n predictions,\n path=model_path\n )\n print(' [predict] Done')",
"def main(args):\r\n\r\n # Logging info\r\n formatter = logging.Formatter('%(asctime)s %(levelname)s - '\r\n '%(funcName)s: %(message)s',\r\n '%H:%M:%S')\r\n logger = logging.getLogger(__name__)\r\n logger.setLevel('INFO')\r\n stream = logging.StreamHandler()\r\n stream.setLevel('INFO')\r\n stream.setFormatter(formatter)\r\n logger.addHandler(stream)\r\n\r\n set_seed(args.seed)\r\n device = torch.device(\r\n 'cuda' if torch.cuda.is_available() and args.cuda else 'cpu')\r\n model_name = f'{args.name}_lr{args.lr}_z{args.latent_dim}' \\\r\n + f'_h{args.hidden_dim}_p{args.p_dropout}'\r\n model_dir = os.path.join(args.results, model_name)\r\n logger.info(f'Directory for saving and loading models: {model_dir}')\r\n\r\n if not args.eval:\r\n # Model directory\r\n new_model_dir(model_dir, logger=logger)\r\n\r\n # Dataloaders\r\n train_loader, valid_loader = get_dataloaders(\r\n args.data, args.t_hours, args.n_bins,\r\n validation=True, dynamic=args.dynamic,\r\n batch_size=args.bs, logger=logger)\r\n logger.info(\r\n f'Train {args.model_type}-{args.t_hours} ' +\r\n f'with {len(train_loader.dataset)} samples')\r\n\r\n # Load model\r\n n_tokens = len(np.load(\r\n os.path.join(\r\n args.data, '_dicts', f'{args.t_hours}_{args.n_bins}.npy'),\r\n allow_pickle=True).item())\r\n model = init_model(\r\n args.model_type, n_tokens, args.latent_dim, args.hidden_dim,\r\n p_dropout=args.p_dropout, dt=args.dt,\r\n weighted=args.weighted, dynamic=args.dynamic)\r\n logger.info(f'#params in model: {get_n_param(model)}')\r\n\r\n # Optimizer\r\n optimizer = torch.optim.Adam(model.parameters(), lr=args.lr)\r\n loss_f = BCE()\r\n model = model.to(device)\r\n\r\n # Training\r\n trainer = Trainer(\r\n model, loss_f, optimizer,\r\n device=device, logger=logger, save_dir=model_dir, p_bar=args.p_bar)\r\n trainer.train(\r\n train_loader, valid_loader,\r\n epochs=args.epochs, early_stopping=args.early_stopping)\r\n\r\n # Save model\r\n metadata = vars(args)\r\n metadata['n_tokens'] = n_tokens\r\n save_model(trainer.model, model_dir, metadata=metadata)\r\n\r\n if args.test:\r\n # Load model\r\n model = load_model(model_dir, is_gpu=args.cuda)\r\n metadata = load_metadata(model_dir)\r\n\r\n # Dataloader\r\n test_loader, _ = get_dataloaders(\r\n metadata['data'], metadata['t_hours'], metadata['n_bins'],\r\n validation=False, dynamic=metadata['dynamic'], batch_size=128,\r\n shuffle=False, logger=logger)\r\n\r\n # Evaluate\r\n loss_f = BCE()\r\n evaluator = Trainer(\r\n model, loss_f,\r\n device=device, logger=logger, save_dir=model_dir, p_bar=args.p_bar)\r\n evaluator._valid_epoch(test_loader)",
"def construct_model(self, output_model_path):\n\n input_tensor = helper.make_tensor_value_info(\"input\", TensorProto.FLOAT, [1, 1, 7, 7])\n output_tensor = helper.make_tensor_value_info(\"output\", TensorProto.FLOAT, [1, 1, 8, 8])\n ini_w = helper.make_tensor(\"weight\", TensorProto.FLOAT, [1, 1, 2, 2], [1.0, 1.0, 1.0, 1.0])\n ini_b = helper.make_tensor(\"bias\", TensorProto.FLOAT, [1], [0.17])\n conv_tranpose_node = onnx.helper.make_node(\n \"ConvTranspose\",\n [\"input\", \"weight\", \"bias\"],\n [\"output\"],\n kernel_shape=[2, 2],\n output_padding=[0, 0],\n pads=[0, 0, 0, 0],\n strides=[1, 1],\n dilations=[1, 1],\n group=1,\n )\n graph = helper.make_graph(\n [conv_tranpose_node],\n \"conv_transpose_test\",\n [input_tensor],\n [output_tensor],\n initializer=[ini_w, ini_b],\n )\n model = helper.make_model(graph, opset_imports=[helper.make_opsetid(\"\", 13)])\n model.ir_version = 7 # use stable onnx ir version\n\n onnx.save(model, output_model_path)",
"def __init__(self, data_provider, growth, depth,\n total_blocks,stages, keep_prob,\n weight_decay, nesterov_momentum, model_type, dataset,\n should_save_logs, should_save_model,\n renew_logs=False,\n reduction=1.0,\n bc_mode=False,\n **kwargs):\n self.data_provider = data_provider\n self.data_shape = data_provider.data_shape # (W,H,C)\n self.n_classes = data_provider.n_classes\n self.depth = depth\n\n #self.growth_rate = growth_rate\n # how many features will be received after first convolution\n # value the same as in the original Torch code\n self.growth = growth\n self.first_output_features = growth[0] * 2\n self.total_blocks = total_blocks\n self.stages = stages\n self.group_1x1 = kwargs['group_1x1']\n self.group_3x3 = kwargs['group_3x3']\n self.condense_factor = kwargs['condense_factor']\n self.bottleneck = kwargs['bottleneck']\n self.group_lasso_lambda= kwargs['group_lasso_lambda']\n\n #self.layers_per_block = (depth - (total_blocks + 1)) // total_blocks\n self.bc_mode = bc_mode\n # compression rate at the transition layers\n self.reduction = reduction\n '''\n if not bc_mode:\n print(\"Build %s model with %d blocks, \"\n \"%d composite layers each.\" % (\n model_type, self.total_blocks, self.layers_per_block))\n if bc_mode:\n self.layers_per_block = self.layers_per_block // 2\n print(\"Build %s model with %d blocks, \"\n \"%d bottleneck layers and %d composite layers each.\" % (\n model_type, self.total_blocks, self.layers_per_block,\n self.layers_per_block))\n '''\n print(\"Reduction at transition layers: %.1f\" % self.reduction)\n\n self.keep_prob = keep_prob\n self.weight_decay = weight_decay\n self.nesterov_momentum = nesterov_momentum\n self.model_type = model_type\n self.dataset_name = dataset\n self.should_save_logs = should_save_logs\n self.should_save_model = should_save_model\n self.renew_logs = renew_logs\n self.batches_step = 0\n\n self._stage = 0\n self._define_inputs()\n self._build_graph()\n self._initialize_session()\n self._count_trainable_params()",
"def model_fn(model_dir):\n \n sym, arg_params, aux_params = mx.model.load_checkpoint('%s/102flowers' % model_dir, 0)\n mod = mx.mod.Module(symbol=sym, context=mx.cpu(), label_names=None)\n mod.bind(for_training=False, data_shapes=[('data', (1,3,224,224))], label_shapes=mod._label_shapes)\n mod.set_params(arg_params, aux_params, allow_missing=True)\n return mod",
"def generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config):\n\n model = get_detector(cfg, checkpoint_path, device=\"cpu\")\n one_img, one_meta = preprocess_example_input(input_config)\n tensor_data = [one_img]\n model.forward = partial(model.forward, img_metas=[[one_meta]], return_loss=False)\n\n return model, tensor_data",
"def __init__(self,\n input_dim,\n dec_seq_len,\n out_seq_len,\n d_model=512,\n nhead=8,\n num_encoder_layers=6,\n num_decoder_layers=6,\n dim_feedforward=2048,\n dropout=0.1,\n activation='relu',\n custom_encoder=None,\n custom_decoder=None):\n super(TransformerTS, self).__init__()\n self.transform = nn.Transformer(\n d_model=d_model,\n nhead=nhead,\n num_encoder_layers=num_encoder_layers,\n num_decoder_layers=num_decoder_layers,\n dim_feedforward=dim_feedforward,\n dropout=dropout,\n activation=activation,\n custom_encoder=custom_encoder,\n custom_decoder=custom_decoder\n )\n self.pos = PositionalEncoding(d_model)\n self.enc_input_fc = nn.Linear(input_dim, d_model)\n self.dec_input_fc = nn.Linear(input_dim, d_model)\n self.out_fc = nn.Linear(dec_seq_len * d_model, out_seq_len)\n self.dec_seq_len = dec_seq_len",
"def forward(self, img):\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n H, W = img.size()[2], img.size()[3]\n #print('x',x)\n #print('x.shape',x.shape) ## 32 x 3 x 96 x 128\n z32 = self.start(img)\n z64 = self.layer1(z32) + self.layer1_ds(z32)\n #print('z1',z64.shape)\n z128 = self.layer2(z64) + self.layer2_ds(z64)\n #print('z2',z128.shape)\n z256 = self.layer3(z128) + self.layer3_ds(z128)\n #print('z3',z256.shape)\n z256d = self.drop_out_layer(z256)\n #print('z_drop',z256d.shape)\n z256u = self.layer4(z256d)\n #print('z4',z256u.shape)\n z128u = self.layer5(torch.cat((z256u, F.interpolate(z256d,size=z256u.size()[2:] )), 1))\n #print('z5',z128u.shape)\n z64u = self.layer6(torch.cat((z128u, F.interpolate(z128,size=z128u.size()[2:] )), 1))\n #print('z6',z64u.shape)\n\n z32u = self.final(torch.cat((z64u, F.interpolate(z64,size=z64u.size()[2:] )), 1))\n #print('z6_plus',z32u.shape)\n\n #print('z7_result',self.classifer(z32u)[:, :, :H, :W].shape)\n result_class = self.classifer(z32u)[:, :, :H, :W]\n\n #print('model result shape',result_class.shape)\n ## 16 x 1 x 300 x 400\n\n # using soft argmax\n spa_argmax = spatial_argmax(torch.squeeze(result_class,1))\n\n #one hot with spatial argmax\n #xy_val = torch.zeros(spa_argmax.shape).float()\n #for idx, pt in enumerate(spa_argmax):\n # x_val = (pt[0]+1.0)*63.5\n # y_val = (pt[1]+1.0)*47.5\n # # for each batch. [0...127][0...95]\n # xy_val[idx][0] = x_val\n # xy_val[idx][1] = y_val\n\n xy_val = (spa_argmax+1.0).to(device)\n #print('spa_argmax',spa_argmax)\n scaling_factor = torch.FloatTensor([[(W-1)/2,0.],[0.,(H-1)/2]]).to(device)\n #scaling_factor = torch.FloatTensor([[63.5,0.],[0.,44.5]]).to(device)\n xy_val = xy_val.mm(scaling_factor)\n\n return xy_val",
"def __init__(self, embed_size):\n super(ImgEncoder, self).__init__()\n model = models.vgg19(pretrained=True)\n in_features = model.classifier[-1].in_features # input size of feature vector\n model.classifier = nn.Sequential(\n *list(model.classifier.children())[:-1]) # remove last fc layer\n\n self.model = model # loaded model without last fc layer\n self.fc = nn.Linear(in_features, embed_size) # feature vector of image",
"def get_xception_based_model() -> nn.Module:\n \"\"\"INSERT YOUR CODE HERE, overrun return.\"\"\"\n custom_network = build_xception_backbone()\n\n base_params = sum(p.numel() for p in custom_network.parameters() if p.requires_grad)\n custom_network.fc = nn.Sequential(\n nn.Linear(2048,1000),\n nn.ReLU(), \n nn.Linear(1000,256),\n nn.ReLU(), \n nn.Linear(256,64),\n nn.ReLU(), \n nn.Linear(64,2),\n )\n s_params = sum(p.numel() for p in custom_network.parameters() if p.requires_grad)\n params_diff = s_params - base_params\n return custom_network",
"def _construct_model(self):\n self.model = AutoEncoderConvolutional(self.n_latent_features, self.reduced_size)\n self.model = self.model.to(self.device, non_blocking=True)",
"def __init__(self, slug, num_classes=2, num_filters=128,\n num_filters_fpn=256, upconv=False, pretrained=True, bifpn=False):\n\n super().__init__()\n\n # Feature Pyramid Network (FPN) with four feature maps of resolutions\n # 1/4, 1/8, 1/16, 1/32 and `num_filters` filters for all feature maps.\n if \"eff\" in slug:\n self.fpn = EffFPN(slug=slug, num_filters=num_filters_fpn,\n pretrained=pretrained, bifpn=bifpn)\n else:\n self.fpn = FPN(slug=slug, num_filters=num_filters_fpn,\n pretrained=pretrained, bifpn=bifpn)\n # The segmentation heads on top of the FPN\n\n self.head1 = nn.Sequential(Conv3x3(num_filters_fpn, num_filters),\n Conv3x3(num_filters, num_filters))\n self.head2 = nn.Sequential(Conv3x3(num_filters_fpn, num_filters),\n Conv3x3(num_filters, num_filters))\n self.head3 = nn.Sequential(Conv3x3(num_filters_fpn, num_filters),\n Conv3x3(num_filters, num_filters))\n self.head4 = nn.Sequential(Conv3x3(num_filters_fpn, num_filters),\n Conv3x3(num_filters, num_filters))\n\n self.hm = nn.Conv2d(4 * num_filters, 1, 3, padding=1)\n\n self.classes_embedding = nn.Sequential(\n nn.Conv2d(4 * num_filters, 4 * num_filters, 3, padding=1),\n nn.ReLU(inplace=True))\n\n self.classes = nn.Sequential(\n nn.Dropout(0.5),\n nn.Conv2d(4 * num_filters, num_classes, 1)\n )\n\n if upconv:\n self.up8 = nn.ConvTranspose2d(\n num_filters, num_filters, 8, stride=8)\n self.up4 = nn.ConvTranspose2d(\n num_filters, num_filters, 4, stride=4)\n self.up2 = nn.ConvTranspose2d(\n num_filters, num_filters, 2, stride=2)\n else:\n self.up8 = torch.nn.Upsample(scale_factor=8, mode='nearest')\n self.up4 = torch.nn.Upsample(scale_factor=4, mode='nearest')\n self.up2 = torch.nn.Upsample(scale_factor=2, mode='nearest')",
"def forward(model: nn.Module, inputs: torch.Tensor, device: torch.device):\n\n model.eval()\n model.to(device)\n\n with torch.no_grad():\n inputs = inputs.to(device)\n return model(inputs)",
"def build_model(cls, args, task):\n # make sure that all args are properly defaulted (in case there are any new ones)\n base_architecture(args)\n\n decoder_embed_dict = None\n if args.decoder_embed_path:\n decoder_embed_dict = utils.parse_embedding(args.decoder_embed_path)\n utils.print_embed_overlap(decoder_embed_dict, task.target_dictionary)\n\n out_channels = speech_utils.eval_str_nested_list_or_tuple(args.encoder_conv_channels, type=int)\n kernel_sizes = speech_utils.eval_str_nested_list_or_tuple(args.encoder_conv_kernel_sizes, type=int)\n strides = speech_utils.eval_str_nested_list_or_tuple(args.encoder_conv_strides, type=int)\n logger.info('input feature dimension: {}, channels: {}'.format(task.feat_dim, task.feat_in_channels))\n assert task.feat_dim % task.feat_in_channels == 0\n conv_layers = ConvBNReLU(\n out_channels, kernel_sizes, strides, in_channels=task.feat_in_channels,\n ) if out_channels is not None else None\n\n fconv_encoder_input_size = task.feat_dim // task.feat_in_channels\n if conv_layers is not None:\n for stride in strides:\n if isinstance(stride, (list, tuple)):\n assert len(stride) > 0\n s = stride[1] if len(stride) > 1 else stride[0]\n else:\n assert isinstance(stride, int)\n s = stride\n fconv_encoder_input_size = (fconv_encoder_input_size + s - 1) // s\n fconv_encoder_input_size *= out_channels[-1]\n\n encoder = SpeechFConvEncoder(\n conv_layers_before=conv_layers,\n input_size=fconv_encoder_input_size,\n embed_dim=args.encoder_embed_dim,\n convolutions=eval(args.encoder_layers),\n dropout=args.dropout,\n )\n decoder = SpeechFConvDecoder(\n dictionary=task.target_dictionary,\n embed_dim=args.decoder_embed_dim,\n embed_dict=decoder_embed_dict,\n convolutions=eval(args.decoder_layers),\n out_embed_dim=args.decoder_out_embed_dim,\n attention=eval(args.decoder_attention),\n dropout=args.dropout,\n max_positions=args.max_target_positions,\n share_embed=args.share_input_output_embed,\n positional_embeddings=args.decoder_positional_embed,\n )\n return cls(encoder, decoder)"
] | [
"0.6455248",
"0.64005536",
"0.63606685",
"0.6281817",
"0.6148253",
"0.614193",
"0.6059045",
"0.60385054",
"0.6027797",
"0.60148734",
"0.60109484",
"0.59764093",
"0.5941529",
"0.5936054",
"0.59347934",
"0.59182537",
"0.59015125",
"0.5899236",
"0.58604336",
"0.58340454",
"0.582944",
"0.58144176",
"0.57966167",
"0.57762283",
"0.5767096",
"0.57593095",
"0.5751081",
"0.5744219",
"0.5742118",
"0.57418245"
] | 0.68160737 | 0 |
Create compression algorithm builders by a given list of algorithm names. | def create_compression_algorithm_builder(config: NNCFConfig, should_init=True) -> PTCompressionAlgorithmBuilder:
algo_names = extract_algorithm_names(config)
return create_compression_algorithm_builder_from_algo_names(algo_names, config, should_init) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_compression_algorithm_builder_from_algo_names(\n algo_names: List[str], config: NNCFConfig, should_init: bool\n) -> PTCompressionAlgorithmBuilder:\n if not algo_names:\n algo_builder_classes = [NoCompressionAlgorithmBuilder]\n else:\n algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for algo_name in algo_names]\n if len(algo_builder_classes) == 1:\n builder = next(iter(algo_builder_classes))(config, should_init=should_init)\n else:\n builder = PTCompositeCompressionAlgorithmBuilder(config, should_init=should_init)\n return builder",
"def algorithms_factory():\n all_algorithms = []\n for algorithm_module in ALGORITHMS:\n module_name = \"{}.{}\".format(PREFIX, algorithm_module)\n module = importlib.import_module(module_name)\n for item in dir(module):\n item = getattr(module, item)\n try:\n if issubclass(item, base.Algorithm):\n item.is_implemented()\n else:\n continue\n except (exceptions.AlgorithmsNotImplemented, TypeError):\n continue\n\n all_algorithms.append(item)\n\n return all_algorithms",
"def _create_algorithm(algo_name, algo_options, origin):\n if origin == \"nlopt\":\n algo = pg.algorithm(pg.nlopt(solver=algo_name))\n for option, val in algo_options.items():\n setattr(algo.extract(pg.nlopt), option, val)\n elif origin == \"pygmo\":\n pygmo_uda = getattr(pg, algo_name)\n algo_options = algo_options.copy()\n if \"popsize\" in algo_options:\n del algo_options[\"popsize\"]\n algo = pg.algorithm(pygmo_uda(**algo_options))\n\n return algo",
"def create_algorithm(AlgorithmName=None, AlgorithmDescription=None, TrainingSpecification=None, InferenceSpecification=None, ValidationSpecification=None, CertifyForMarketplace=None):\n pass",
"def create_compression_wdf(wgb_fnames):\n # assumes filename is the same except the compression extension\n wdf = {}\n cnt = 1\n for child in wgb_fnames:\n parent = os.path.splitext(child)[0]\n wdf['derived_%s' % cnt] = {provdefs.PROV_PARENTS: parent, provdefs.PROV_CHILDREN: child}\n cnt += 1\n\n return wdf",
"def bvp_algorithm(name, **kwargs):\n # Load algorithm from the package\n for algorithm in available_algorithms:\n if name.lower() == algorithm.__name__.lower():\n return algorithm(**kwargs)\n else:\n # Raise exception if the loop completes without finding an algorithm by the given name\n raise ValueError('Algorithm ' + name + ' not found')",
"def genHashFuncs(num_of_func, baskets):\n func_list = list()\n\n def build_func(param_a, param_b, param_m):\n def apply_funcs(input_x):\n return format((param_a * input_x + param_b) % param_m, 'b') \\\n .zfill(NUM_OF_BIT)\n\n return apply_funcs\n\n param_as = random.sample(range(1, sys.maxsize - 1), num_of_func)\n param_bs = random.sample(range(0, sys.maxsize - 1), num_of_func)\n for a, b in zip(param_as, param_bs):\n func_list.append(build_func(a, b, baskets))\n\n return func_list",
"def _create_activation_quantizers(self, tensor_names: List[str], activation_bw: int,\n round_mode: libpymo.RoundingMode, quant_scheme: QuantScheme,\n is_symmetric: bool, data_type: QuantizationDataType) -> Dict[str, StaticGridPerTensorQuantizer]:\n quantizers = {}\n for layer in range(self.num_layers):\n for name in tensor_names:\n name_in_layer = name.format(layer)\n group_name = QcQuantizeRecurrent._get_group_name(name_in_layer, layer)\n if group_name:\n if group_name not in self._grouped_quantizers:\n self._grouped_quantizers[group_name] = \\\n tensor_quantizer_factory(activation_bw,\n round_mode,\n quant_scheme,\n use_symmetric_encodings=is_symmetric,\n enabled_by_default=False,\n data_type=data_type)\n quantizers[name_in_layer] = self._grouped_quantizers[group_name]\n else:\n quantizers[name_in_layer] = tensor_quantizer_factory(\n activation_bw,\n round_mode,\n quant_scheme,\n use_symmetric_encodings=is_symmetric,\n enabled_by_default=False,\n data_type=data_type)\n return quantizers",
"def build(keys: List[str]):\n api = API()\n api.build(*keys)",
"def abstract_builder(pkg_name, name_list, return_list = False):\n # some handlers needs dicts (commands, beacons), while some need lists (encoders,decoders, etc)\n if return_list:\n ret_val = []\n else:\n ret_val = {}\n \n # Go through the string names and get the appropriate Class from the appropriate module.\n # Once you have that, do a dynamic import so we can use it, then map that class type\n # so we can instantiate the appropriate instance when going through a beaconing interation.\n for module_name in name_list:\n\n module_class = Controller.easy_import(pkg_name, module_name) # imports the class\n if return_list:\n ret_val.append(module_class) # adds the Class object to a list\n else:\n ret_val[module_name] = module_class # maps the Class object to the appropriate module name\n \n return ret_val",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # Add are you a lucker strategies\n strategies.extend(\n generate_meta_strategy_pair(AreYouALuckerStrategy))\n\n # Add Greenberg strategies\n strategies.extend(\n generate_meta_strategy_pair(GreenbergStrategy))\n\n # Add RPS Meta Fix strategies\n strategies.extend(\n generate_meta_strategy_pair(RPSMetaFixStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def MakeBucketsCommand(self, args, unused_sub_opts=None, headers=None,\n debug=0):\n for bucket_uri_str in args:\n bucket_uri = self.StorageUri(bucket_uri_str, debug=debug)\n print 'Creating %s...' % bucket_uri\n bucket_uri.create_bucket(headers)",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # Add are you a lucker strategies\n strategies.extend(\n generate_meta_strategy_pair(AreYouALuckerStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def builder_factory(\n agent_types: Dict[ma_types.AgentID, ma_types.GenericAgent],\n agent_configs: Dict[ma_types.AgentID, ma_types.AgentConfig],\n init_builder_fn: Optional[ma_types.InitBuilderFn] = None\n) -> Dict[ma_types.AgentID, jax_builders.GenericActorLearnerBuilder]:\n init_fn = init_builder_fn or init_default_builder\n builders = {}\n for agent_id, agent_type in agent_types.items():\n builders[agent_id] = init_fn(agent_type, agent_configs[agent_id])\n return builders",
"def getAllContributingAlgorithmsToBest(algnamelist, target_lb=1e-8, \n target_ub=1e2):\n \n print \"Generating best algorithm data from given algorithm list...\\n\", \n customgenerate(algnamelist)\n \n bestalgfilepath = 'bestCustomAlg'\n picklefilename = os.path.join(bestalgfilepath, 'bestalg.pickle')\n fid = open(picklefilename, 'r')\n bestalgentries = pickle.load(fid)\n fid.close()\n print 'loading of best algorithm data done.'\n \n countsperalgorithm = {}\n for (d, f) in bestalgentries:\n print 'dimension:', d, ', function:', f\n print f\n setofalgs = set(bestalgentries[d,f].algs)\n # pre-processing data to only look at targets >= target_lb:\n correctedbestalgentries = []\n for i in range(0,len(bestalgentries[d,f].target)):\n if ((bestalgentries[d,f].target[i] >= target_lb) and\n (bestalgentries[d,f].target[i] <= target_ub)):\n \n correctedbestalgentries.append(bestalgentries[d,f].algs[i])\n print len(correctedbestalgentries)\n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += correctedbestalgentries.count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # Add are you a lucker strategies\n strategies.extend(\n generate_meta_strategy_pair(AreYouALuckerStrategy))\n\n # Add RPS Meta Fix strategies\n strategies.extend(\n generate_meta_strategy_pair(RPSMetaFixStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def _set_up_pacman_algorithm_listings(\n self, algorithms, optional_algorithms, xml_paths, inputs,\n required_outputs):\n\n # deduce if the algorithms are internal or external\n algorithms_names = list(algorithms)\n\n # set up XML reader for standard PACMAN algorithms XML file reader\n # (used in decode_algorithm_data_objects function)\n xml_paths.append(os.path.join(\n os.path.dirname(operations.__file__),\n \"algorithms_metadata.xml\"))\n xml_paths.append(os.path.join(\n os.path.dirname(algorithm_reports.__file__),\n \"reports_metadata.xml\"))\n\n converter_xml_path = list()\n converter_xml_path.append(os.path.join(\n os.path.dirname(file_format_converters.__file__),\n \"converter_algorithms_metadata.xml\"))\n\n # decode the algorithms specs\n xml_decoder = ConvertAlgorithmsMetadata(xml_paths)\n algorithm_data_objects = xml_decoder.decode_algorithm_data_objects()\n xml_decoder = ConvertAlgorithmsMetadata(converter_xml_path)\n converter_algorithm_data_objects = \\\n xml_decoder.decode_algorithm_data_objects()\n\n # filter for just algorithms we want to use\n algorithm_data = self._get_algorithm_data(\n algorithms_names, algorithm_data_objects,\n converter_algorithm_data_objects)\n optional_algorithms_datas = self._get_algorithm_data(\n optional_algorithms, algorithm_data_objects,\n converter_algorithm_data_objects)\n optional_algorithms_datas.extend(\n converter_algorithm_data_objects.values())\n\n # sort_out_order_of_algorithms for execution\n self._sort_out_order_of_algorithms(\n inputs, required_outputs, algorithm_data,\n optional_algorithms_datas)",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def create_files(filename_list, encoding):\n for filename in filename_list:\n codecs.open(filename, 'w', encoding).close()",
"def compress(names, method):\n if not isinstance(names, list):\n ValueError(\"Expected a list of names, got a {0}.\".format(type(names)))\n compressions = []\n raw_compressions = map(method, names)\n # Double metaphone returns a list of tuples, so need to unpack it\n for item in raw_compressions:\n if isinstance(item, (list, tuple)):\n compressions.extend([unicode(sub) for sub in item if sub != ''])\n elif item != '':\n compressions.append(unicode(item))\n return compressions if compressions else ['']",
"def register_algorithm(self, builder: 'CompressionAlgorithmBuilder'):\n self._builders.append(builder)",
"def _prepare_wick(term, comparator, contractor, symms, resolvers):\n\n symms = {} if symms is None else symms\n contr_all = comparator is None\n\n if contr_all:\n contrs = _get_all_contrs(term, contractor, resolvers=resolvers)\n vec_order = None\n else:\n term = term.canon4normal(symms)\n vec_order, contrs = _sort_vecs(\n term, comparator, contractor, resolvers=resolvers\n )\n\n # schemes = _compute_wick_schemes(vec_order, contrs)\n schemes = compose_wick(vec_order, contrs)\n\n return term, contrs, schemes",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def _compile_packers(endian):\n return {\n \"B\": struct.Struct(endian + \"B\"),\n \"b\": struct.Struct(endian + \"b\"),\n \"h\": struct.Struct(endian + \"h\"),\n \"H\": struct.Struct(endian + \"H\"),\n \"l\": struct.Struct(endian + \"l\"),\n \"L\": struct.Struct(endian + \"L\"),\n \"d\": struct.Struct(endian + \"d\"),\n \"f\": struct.Struct(endian + \"f\"),\n }",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def build_encoder(\n self,\n build_encoder: dict,\n target_dir: str,\n cache_dir: str,\n train_csv_path: str,\n valid_csv_path: str,\n test_csv_paths: list,\n get_path_only: bool = False,\n ):\n encoder_path = Path(target_dir) / \"encoder.pkl\"\n if get_path_only:\n return encoder_path\n\n train_csv = pd.read_csv(train_csv_path)\n valid_csv = pd.read_csv(valid_csv_path)\n test_csvs = [pd.read_csv(path) for path in test_csv_paths]\n all_csv = pd.concat([train_csv, valid_csv, *test_csvs])\n\n multilabels = [\n [label.strip() for label in multilabel.split(\";\")]\n for multilabel in all_csv[\"labels\"].tolist()\n ]\n encoder = CategoryEncoders(\n [single_category_labels for single_category_labels in zip(*multilabels)]\n )\n with open(encoder_path, \"wb\") as f:\n pickle.dump(encoder, f)\n\n return encoder",
"def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def algorithms():\n algorith_paradigms = ['Divide-and-conquer', 'Backtrackig', 'Greedy-Algorithms', 'Dynamic-programming']\n return algorith_paradigms",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations"
] | [
"0.8176492",
"0.58072144",
"0.56273663",
"0.5360396",
"0.5243267",
"0.5199738",
"0.5186726",
"0.5184808",
"0.5183975",
"0.5176187",
"0.5148908",
"0.51379925",
"0.51363987",
"0.5109506",
"0.5105624",
"0.51028985",
"0.5090822",
"0.50801146",
"0.50719637",
"0.5062348",
"0.5056173",
"0.50211424",
"0.50145674",
"0.49952805",
"0.49816802",
"0.49771792",
"0.4938226",
"0.4938226",
"0.49328366",
"0.49317572"
] | 0.62809783 | 1 |
Create compression algorithm builders by a given list of algorithm names. | def create_compression_algorithm_builder_from_algo_names(
algo_names: List[str], config: NNCFConfig, should_init: bool
) -> PTCompressionAlgorithmBuilder:
if not algo_names:
algo_builder_classes = [NoCompressionAlgorithmBuilder]
else:
algo_builder_classes = [PT_COMPRESSION_ALGORITHMS.get(algo_name) for algo_name in algo_names]
if len(algo_builder_classes) == 1:
builder = next(iter(algo_builder_classes))(config, should_init=should_init)
else:
builder = PTCompositeCompressionAlgorithmBuilder(config, should_init=should_init)
return builder | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_compression_algorithm_builder(config: NNCFConfig, should_init=True) -> PTCompressionAlgorithmBuilder:\n algo_names = extract_algorithm_names(config)\n return create_compression_algorithm_builder_from_algo_names(algo_names, config, should_init)",
"def algorithms_factory():\n all_algorithms = []\n for algorithm_module in ALGORITHMS:\n module_name = \"{}.{}\".format(PREFIX, algorithm_module)\n module = importlib.import_module(module_name)\n for item in dir(module):\n item = getattr(module, item)\n try:\n if issubclass(item, base.Algorithm):\n item.is_implemented()\n else:\n continue\n except (exceptions.AlgorithmsNotImplemented, TypeError):\n continue\n\n all_algorithms.append(item)\n\n return all_algorithms",
"def _create_algorithm(algo_name, algo_options, origin):\n if origin == \"nlopt\":\n algo = pg.algorithm(pg.nlopt(solver=algo_name))\n for option, val in algo_options.items():\n setattr(algo.extract(pg.nlopt), option, val)\n elif origin == \"pygmo\":\n pygmo_uda = getattr(pg, algo_name)\n algo_options = algo_options.copy()\n if \"popsize\" in algo_options:\n del algo_options[\"popsize\"]\n algo = pg.algorithm(pygmo_uda(**algo_options))\n\n return algo",
"def create_algorithm(AlgorithmName=None, AlgorithmDescription=None, TrainingSpecification=None, InferenceSpecification=None, ValidationSpecification=None, CertifyForMarketplace=None):\n pass",
"def create_compression_wdf(wgb_fnames):\n # assumes filename is the same except the compression extension\n wdf = {}\n cnt = 1\n for child in wgb_fnames:\n parent = os.path.splitext(child)[0]\n wdf['derived_%s' % cnt] = {provdefs.PROV_PARENTS: parent, provdefs.PROV_CHILDREN: child}\n cnt += 1\n\n return wdf",
"def bvp_algorithm(name, **kwargs):\n # Load algorithm from the package\n for algorithm in available_algorithms:\n if name.lower() == algorithm.__name__.lower():\n return algorithm(**kwargs)\n else:\n # Raise exception if the loop completes without finding an algorithm by the given name\n raise ValueError('Algorithm ' + name + ' not found')",
"def genHashFuncs(num_of_func, baskets):\n func_list = list()\n\n def build_func(param_a, param_b, param_m):\n def apply_funcs(input_x):\n return format((param_a * input_x + param_b) % param_m, 'b') \\\n .zfill(NUM_OF_BIT)\n\n return apply_funcs\n\n param_as = random.sample(range(1, sys.maxsize - 1), num_of_func)\n param_bs = random.sample(range(0, sys.maxsize - 1), num_of_func)\n for a, b in zip(param_as, param_bs):\n func_list.append(build_func(a, b, baskets))\n\n return func_list",
"def _create_activation_quantizers(self, tensor_names: List[str], activation_bw: int,\n round_mode: libpymo.RoundingMode, quant_scheme: QuantScheme,\n is_symmetric: bool, data_type: QuantizationDataType) -> Dict[str, StaticGridPerTensorQuantizer]:\n quantizers = {}\n for layer in range(self.num_layers):\n for name in tensor_names:\n name_in_layer = name.format(layer)\n group_name = QcQuantizeRecurrent._get_group_name(name_in_layer, layer)\n if group_name:\n if group_name not in self._grouped_quantizers:\n self._grouped_quantizers[group_name] = \\\n tensor_quantizer_factory(activation_bw,\n round_mode,\n quant_scheme,\n use_symmetric_encodings=is_symmetric,\n enabled_by_default=False,\n data_type=data_type)\n quantizers[name_in_layer] = self._grouped_quantizers[group_name]\n else:\n quantizers[name_in_layer] = tensor_quantizer_factory(\n activation_bw,\n round_mode,\n quant_scheme,\n use_symmetric_encodings=is_symmetric,\n enabled_by_default=False,\n data_type=data_type)\n return quantizers",
"def build(keys: List[str]):\n api = API()\n api.build(*keys)",
"def abstract_builder(pkg_name, name_list, return_list = False):\n # some handlers needs dicts (commands, beacons), while some need lists (encoders,decoders, etc)\n if return_list:\n ret_val = []\n else:\n ret_val = {}\n \n # Go through the string names and get the appropriate Class from the appropriate module.\n # Once you have that, do a dynamic import so we can use it, then map that class type\n # so we can instantiate the appropriate instance when going through a beaconing interation.\n for module_name in name_list:\n\n module_class = Controller.easy_import(pkg_name, module_name) # imports the class\n if return_list:\n ret_val.append(module_class) # adds the Class object to a list\n else:\n ret_val[module_name] = module_class # maps the Class object to the appropriate module name\n \n return ret_val",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # Add are you a lucker strategies\n strategies.extend(\n generate_meta_strategy_pair(AreYouALuckerStrategy))\n\n # Add Greenberg strategies\n strategies.extend(\n generate_meta_strategy_pair(GreenbergStrategy))\n\n # Add RPS Meta Fix strategies\n strategies.extend(\n generate_meta_strategy_pair(RPSMetaFixStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def MakeBucketsCommand(self, args, unused_sub_opts=None, headers=None,\n debug=0):\n for bucket_uri_str in args:\n bucket_uri = self.StorageUri(bucket_uri_str, debug=debug)\n print 'Creating %s...' % bucket_uri\n bucket_uri.create_bucket(headers)",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # Add are you a lucker strategies\n strategies.extend(\n generate_meta_strategy_pair(AreYouALuckerStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def builder_factory(\n agent_types: Dict[ma_types.AgentID, ma_types.GenericAgent],\n agent_configs: Dict[ma_types.AgentID, ma_types.AgentConfig],\n init_builder_fn: Optional[ma_types.InitBuilderFn] = None\n) -> Dict[ma_types.AgentID, jax_builders.GenericActorLearnerBuilder]:\n init_fn = init_builder_fn or init_default_builder\n builders = {}\n for agent_id, agent_type in agent_types.items():\n builders[agent_id] = init_fn(agent_type, agent_configs[agent_id])\n return builders",
"def getAllContributingAlgorithmsToBest(algnamelist, target_lb=1e-8, \n target_ub=1e2):\n \n print \"Generating best algorithm data from given algorithm list...\\n\", \n customgenerate(algnamelist)\n \n bestalgfilepath = 'bestCustomAlg'\n picklefilename = os.path.join(bestalgfilepath, 'bestalg.pickle')\n fid = open(picklefilename, 'r')\n bestalgentries = pickle.load(fid)\n fid.close()\n print 'loading of best algorithm data done.'\n \n countsperalgorithm = {}\n for (d, f) in bestalgentries:\n print 'dimension:', d, ', function:', f\n print f\n setofalgs = set(bestalgentries[d,f].algs)\n # pre-processing data to only look at targets >= target_lb:\n correctedbestalgentries = []\n for i in range(0,len(bestalgentries[d,f].target)):\n if ((bestalgentries[d,f].target[i] >= target_lb) and\n (bestalgentries[d,f].target[i] <= target_ub)):\n \n correctedbestalgentries.append(bestalgentries[d,f].algs[i])\n print len(correctedbestalgentries)\n # now count how often algorithm a is best for the extracted targets\n for a in setofalgs:\n # use setdefault to initialize with zero if a entry not existant:\n countsperalgorithm.setdefault((d, a), 0) \n countsperalgorithm[(d,a)] += correctedbestalgentries.count(a)\n \n selectedalgsperdimension = {}\n for (d,a) in sorted(countsperalgorithm):\n if not selectedalgsperdimension.has_key(d):\n selectedalgsperdimension[d] = []\n selectedalgsperdimension[d].append((countsperalgorithm[(d,a)], a))\n \n for d in sorted(selectedalgsperdimension):\n print d, 'D:'\n for (count, alg) in sorted(selectedalgsperdimension[d], reverse=True):\n print count, alg\n print '\\n'\n \n \n print \" done.\"",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # Add are you a lucker strategies\n strategies.extend(\n generate_meta_strategy_pair(AreYouALuckerStrategy))\n\n # Add RPS Meta Fix strategies\n strategies.extend(\n generate_meta_strategy_pair(RPSMetaFixStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def _set_up_pacman_algorithm_listings(\n self, algorithms, optional_algorithms, xml_paths, inputs,\n required_outputs):\n\n # deduce if the algorithms are internal or external\n algorithms_names = list(algorithms)\n\n # set up XML reader for standard PACMAN algorithms XML file reader\n # (used in decode_algorithm_data_objects function)\n xml_paths.append(os.path.join(\n os.path.dirname(operations.__file__),\n \"algorithms_metadata.xml\"))\n xml_paths.append(os.path.join(\n os.path.dirname(algorithm_reports.__file__),\n \"reports_metadata.xml\"))\n\n converter_xml_path = list()\n converter_xml_path.append(os.path.join(\n os.path.dirname(file_format_converters.__file__),\n \"converter_algorithms_metadata.xml\"))\n\n # decode the algorithms specs\n xml_decoder = ConvertAlgorithmsMetadata(xml_paths)\n algorithm_data_objects = xml_decoder.decode_algorithm_data_objects()\n xml_decoder = ConvertAlgorithmsMetadata(converter_xml_path)\n converter_algorithm_data_objects = \\\n xml_decoder.decode_algorithm_data_objects()\n\n # filter for just algorithms we want to use\n algorithm_data = self._get_algorithm_data(\n algorithms_names, algorithm_data_objects,\n converter_algorithm_data_objects)\n optional_algorithms_datas = self._get_algorithm_data(\n optional_algorithms, algorithm_data_objects,\n converter_algorithm_data_objects)\n optional_algorithms_datas.extend(\n converter_algorithm_data_objects.values())\n\n # sort_out_order_of_algorithms for execution\n self._sort_out_order_of_algorithms(\n inputs, required_outputs, algorithm_data,\n optional_algorithms_datas)",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy4))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def create_files(filename_list, encoding):\n for filename in filename_list:\n codecs.open(filename, 'w', encoding).close()",
"def compress(names, method):\n if not isinstance(names, list):\n ValueError(\"Expected a list of names, got a {0}.\".format(type(names)))\n compressions = []\n raw_compressions = map(method, names)\n # Double metaphone returns a list of tuples, so need to unpack it\n for item in raw_compressions:\n if isinstance(item, (list, tuple)):\n compressions.extend([unicode(sub) for sub in item if sub != ''])\n elif item != '':\n compressions.append(unicode(item))\n return compressions if compressions else ['']",
"def register_algorithm(self, builder: 'CompressionAlgorithmBuilder'):\n self._builders.append(builder)",
"def _prepare_wick(term, comparator, contractor, symms, resolvers):\n\n symms = {} if symms is None else symms\n contr_all = comparator is None\n\n if contr_all:\n contrs = _get_all_contrs(term, contractor, resolvers=resolvers)\n vec_order = None\n else:\n term = term.canon4normal(symms)\n vec_order, contrs = _sort_vecs(\n term, comparator, contractor, resolvers=resolvers\n )\n\n # schemes = _compute_wick_schemes(vec_order, contrs)\n schemes = compose_wick(vec_order, contrs)\n\n return term, contrs, schemes",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def _compile_packers(endian):\n return {\n \"B\": struct.Struct(endian + \"B\"),\n \"b\": struct.Struct(endian + \"b\"),\n \"h\": struct.Struct(endian + \"h\"),\n \"H\": struct.Struct(endian + \"H\"),\n \"l\": struct.Struct(endian + \"l\"),\n \"L\": struct.Struct(endian + \"L\"),\n \"d\": struct.Struct(endian + \"d\"),\n \"f\": struct.Struct(endian + \"f\"),\n }",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def build_encoder(\n self,\n build_encoder: dict,\n target_dir: str,\n cache_dir: str,\n train_csv_path: str,\n valid_csv_path: str,\n test_csv_paths: list,\n get_path_only: bool = False,\n ):\n encoder_path = Path(target_dir) / \"encoder.pkl\"\n if get_path_only:\n return encoder_path\n\n train_csv = pd.read_csv(train_csv_path)\n valid_csv = pd.read_csv(valid_csv_path)\n test_csvs = [pd.read_csv(path) for path in test_csv_paths]\n all_csv = pd.concat([train_csv, valid_csv, *test_csvs])\n\n multilabels = [\n [label.strip() for label in multilabel.split(\";\")]\n for multilabel in all_csv[\"labels\"].tolist()\n ]\n encoder = CategoryEncoders(\n [single_category_labels for single_category_labels in zip(*multilabels)]\n )\n with open(encoder_path, \"wb\") as f:\n pickle.dump(encoder, f)\n\n return encoder",
"def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def generate():\n strategies = []\n\n # Add RFind strategies (2 meta-strategies P0 and P'0 for each)\n limits=[50, 20, 10]\n sources = ['his', 'our', 'dna']\n for limit in limits:\n for source in sources:\n strategies.extend(\n generate_meta_strategy_pair(\n RFindStrategy,\n limit=limit,\n src=source,\n shenanigans=False,\n ))\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeV10Strategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n # Add testimono strategy\n strategies.extend(\n generate_meta_strategy_pair(TestimonoStrategy))\n\n # Add RPS Geometry\n strategies.extend(\n generate_meta_strategy_pair(GeometryV4Strategy))\n\n # Add Iocaine Powder strategy\n strategies.extend(\n generate_meta_strategy_pair(IocanePowderStrategy))\n\n # By default, rotate everything\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations",
"def algorithms():\n algorith_paradigms = ['Divide-and-conquer', 'Backtrackig', 'Greedy-Algorithms', 'Dynamic-programming']\n return algorith_paradigms",
"def generate():\n strategies = []\n\n # Add Centrifugal Bumblepuppy 16+H (RFind based)\n strategies.extend(\n generate_meta_strategy_pair(CentrifugalBumblepuppy16h))\n\n # Add decision tree strategies\n strategies.extend(\n generate_meta_strategy_pair(DecisionTreeStrategy))\n\n # Add HPS Dojo strategies\n strategies.extend(\n generate_meta_strategy_pair(HPSDojoStrategy))\n\n # Add testing please ignore strategies\n strategies.extend(\n generate_meta_strategy_pair(TestingPlsIgnoreStrategy))\n\n do_rotations = [True for _ in strategies]\n\n # Anti Trivial\n strategies.extend(\n generate_meta_strategy_pair(\n AntiTrivialStrategy, mirroring=False))\n do_rotations.extend([False])\n\n return strategies, do_rotations"
] | [
"0.62809783",
"0.58072144",
"0.56273663",
"0.5360396",
"0.5243267",
"0.5199738",
"0.5186726",
"0.5184808",
"0.5183975",
"0.5176187",
"0.5148908",
"0.51379925",
"0.51363987",
"0.5109506",
"0.5105624",
"0.51028985",
"0.5090822",
"0.50801146",
"0.50719637",
"0.5062348",
"0.5056173",
"0.50211424",
"0.50145674",
"0.49952805",
"0.49816802",
"0.49771792",
"0.4938226",
"0.4938226",
"0.49328366",
"0.49317572"
] | 0.8176492 | 0 |
Helper to call ``ir.actions.report.xml.render_report()``. | def render_report(cr, uid, ids, name, data, context=None):
registry = yuancloud.modules.registry.RegistryManager.get(cr.dbname)
return registry['ir.actions.report.xml'].render_report(cr, uid, ids, name, data, context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_xml_report(self, parser, data, objects,context=None):\n raise NotImplementedError()",
"def display_reports(self, layout): # pylint: disable=arguments-differ",
"def _generate_report(self):\n raise NotImplementedError",
"def render_report(self, res_ids, name, data):\n report = self._lookup_report(name)\n if isinstance(report, basestring): # Qweb report\n # The only case where a QWeb report is rendered with this method occurs when running\n # yml tests originally written for RML reports.\n if tools.config['test_enable'] and not tools.config['test_report_directory']:\n # Only generate the pdf when a destination folder has been provided.\n return self.env['report'].get_html(res_ids, report, data=data), 'html'\n else:\n return self.env['report'].get_pdf(res_ids, report, data=data), 'pdf'\n else:\n return report.create(self._cr, self._uid, res_ids, data, context=self._context)",
"def print_report_pdf(self):\n self.ensure_one()\n return self.env.ref('eliterp_sale_reports.action_report_product_catalogue').report_action(self)",
"def report():\n pass",
"def get_report(self):\n data = {\n 'ids': self.ids,\n 'model': self._name,\n 'form': {\n 'date_start': self.date_start,\n 'date_end': self.date_end,\n },\n }\n\n # use `module_name.report_id` as reference.\n # `report_action()` will call `_get_report_values()` and pass `data` automatically.\n return self.env.ref('base_enh.recap_report').report_action(self, data=data)",
"def print_report(self):\n assert len(self) == 1, 'This option should only be used for a single id at a time.'\n datas = {\n 'form': \n {\n 'company_id': self.company_id and [self.company_id.id] or [],\n 'warehouse_ids': [y.id for y in self.warehouse_ids],\n 'start_date': self.start_date,\n 'end_date': self.end_date,\n 'include_zero': self.include_zero,\n 'sort_order': self.sort_order,\n 'value': self.value,\n 'id': self.id,\n }\n }\n\n if [y.id for y in self.warehouse_ids] and (not self.company_id):\n self.warehouse_ids = []\n raise Warning(_('Please select company of those warehouses to get correct view.\\nYou should remove all warehouses first from selection field.'))\n return self.env.ref(\n 'most_selling_product.action_ir_most_selling_product'\n ).report_action(self, data=datas)",
"def create_report(self, output):\n if output == 'xml':\n report = super(Report, self).create_report()\n return report\n elif output == 'csv':\n return self.statement_detail_csv()",
"def CompileReport(self, mediator):\n return",
"def print_event_report(self):\n data = {\n 'ids': self.ids,\n 'model': self._name,\n 'form': {\n 'event_start_date': self.event_start_date,\n 'event_end_date': self.event_end_date,\n 'agenda': self.env.context.get('default_agenda_id'),\n },\n }\n return self.env.ref('agenda_esi.recap_report').report_action(self, data=data)",
"def GenerateReport(self, plugin):\n raise 'Method not implemented'",
"def report(self, report_options=None):\n raise NotImplementedError()",
"def report(self, output_dir):",
"def print_report_pdf(self):\n self.ensure_one()\n return self.env.ref('eliterp_sale_reports.action_report_product_sold').report_action(self)",
"def report(self, **options):\n pass",
"def reports(env, node_name):\n envs = environments()\n check_env(env, envs)\n return render_template(\n 'reports.html',\n envs=envs,\n current_env=env,\n node_name=node_name,\n columns=REPORTS_COLUMNS)",
"def print_report(self, cr, uid, ids, context=None):\n if context is None:\n context = {}\n partner_obj = self.pool.get('res.partner')\n partner_ids = partner_obj.search(cr, uid, [('customer', '=', True)], context=context)\n current_date = datetime.today().strftime('%Y-%m-%d')\n date = (datetime.today() - relativedelta(months=+1,day=1,days=-1)).strftime('%Y-%m-%d')\n print_ids = []\n for partner in partner_obj.browse(cr, uid, partner_ids, context=context):\n for sale in partner.sale_order_ids:\n if date < sale.date_order and sale.date_order < current_date:\n print_ids.append(partner.id)\n \n list_ids = []\n list_ids = list(set(partner_ids)-set(print_ids))\n if not print_ids:\n raise osv.except_osv(_('Warring!'), _('There is no partner'))\n \n datas = {'ids': list_ids}\n res = self.read(cr, uid, ids, context=context)\n res = res and res[0] or {}\n res.update({'ids': datas['ids']})\n datas.update({'form': res})\n return {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'inactive.partner.report',\n 'datas': datas,\n }",
"def buildReports(self):\n pass",
"def report(self) -> Any:",
"def GenerateReport(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('generateReport', payload=payload, response_object=None)",
"def report(self) -> computation_base.Computation:\n return self._report_fn",
"def print_xlsx(self):\n if self.date_from and self.date_to:\n if self.date_from > self.date_to:\n raise ValidationError(\"Date From must be less than Date To\")\n\n # active_record = self._context['id']\n # record = self.env['room.accommodation'].browse(active_record)\n data = {\n 'date_from': self.date_from,\n 'date_to': self.date_to,\n 'guest_id': self.guest_id.id,\n 'model_id': self.id,\n 'check_out': self.check_out,\n 'date_today': fields.Datetime.now()\n }\n\n print(\"XLSX Wizard data : \", data)\n\n return {\n 'type': 'ir.actions.report',\n 'data': {\n 'model': 'accommodation.reporting',\n 'options': json.dumps(data, default=date_utils.json_default),\n 'output_format': 'xlsx',\n 'report_name': 'Accommodation Report'\n },\n 'report_type': 'xlsx'\n }",
"def _render(self) -> None:\n pass",
"def print_report(self, stream):\n stream.write(ET.tostring(self.xml()))",
"def report(self):\n #i need to figure out how to pass all these in a list or something, woof.\n self.report_generator_module.run(\\\n self.total,\\\n self.unique,\\\n self.top_10,\\\n self.top_10_base,\\\n self.lengths,\\\n self.counts,\\\n self.one_to_six,\\\n self.trailing_number,\\\n self.last_1digit,\\\n self.last_2digit,\\\n self.last_3digit,\\\n self.last_4digit,\\\n self.last_5digit,\\\n self.charset)",
"def __execute_reporter(self):\n if not self.__args.report:\n return\n reporter.HTMLReporter().generate_report_from_file(\n self.__lst_json_files)",
"def report(env, node_name, report_id):\n envs = environments()\n check_env(env, envs)\n query = AndOperator()\n report_id_query = OrOperator()\n\n report_id_query.add(EqualsOperator(\"hash\", report_id))\n report_id_query.add(EqualsOperator(\"configuration_version\", report_id))\n\n if env != '*':\n query.add(EqualsOperator(\"environment\", env))\n\n query.add(EqualsOperator(\"certname\", node_name))\n query.add(report_id_query)\n\n reports = puppetdb.reports(query=query)\n\n try:\n report = next(reports)\n except StopIteration:\n abort(404)\n\n return render_template(\n 'report.html',\n report=report,\n events=yield_or_stop(report.events()),\n logs=report.logs,\n metrics=report.metrics,\n envs=envs,\n current_env=env)",
"def _create_xml_report(self, test, xml_obj):\n xml_report_path = os.path.join(test.work_dir,\n self.XML_REPORT_PATH)\n with open(xml_report_path, 'w') as xml_report:\n xml_report.write(etree.tostring(xml_obj, pretty_print=True))",
"def report_callback(self, object, report, request):\n ..."
] | [
"0.6349062",
"0.63135356",
"0.60601676",
"0.60378766",
"0.59094083",
"0.59055895",
"0.58374727",
"0.57946086",
"0.57253027",
"0.56983304",
"0.5696021",
"0.5689943",
"0.56596303",
"0.5653516",
"0.56528664",
"0.56278044",
"0.5620049",
"0.56102747",
"0.55857426",
"0.55817837",
"0.55789405",
"0.5573373",
"0.5572717",
"0.5565915",
"0.5550966",
"0.5516647",
"0.5487175",
"0.54738516",
"0.54710037",
"0.54620606"
] | 0.7703999 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.