query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Set the global trace specification based on the given trace string. Loop through all registered modules and set their trace class trace level based on the given trace string if the module name matches one of the module patterns in the given trace string.
def configureTrace(traceString): setTraceSpec(traceString) registeredModules = Trace.tracedEntities.keys() for module in registeredModules: for spec in Trace.traceSpec: if (spec.compiledRegex.match(module)): trace = Trace.tracedEntities[module] trace.setTraceLevel(spec.level) break #endIf #endFor #endFor
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setTraceSpec(traceString):\n \n if (not traceString):\n raise Exception(\"The traceString argument must be a non-empty string.\")\n #endIf\n \n Trace.traceSpec = parseTraceString(traceString)\n Trace.traceString = traceString", "def configureTrace(self,traceString):\n configureTrace(traceString)", "def parseTraceString(traceString):\n result = []\n # If the given traceString is enclosed in double-quotes,\n # then strip the double-quotes.\n if (traceString[0] == '\"' and traceString[-1] == '\"'):\n traceString = traceString[1:-1]\n #endIf\n traceStrings = traceString.split(\":\")\n for trace in traceStrings:\n traceParts = trace.split(\"=\")\n if (len(traceParts) != 2):\n raise TraceSpecificationException(\"Encountered an invalid trace string: %s A trace string looks like <module_pattern>=<level>.\" % trace)\n #endIf\n \n modulePattern = traceParts[0]\n level = traceParts[1]\n result.append(TraceSpecification(modulePattern,level))\n #endFor\n return result", "def configureThisTrace(self):\n for spec in Trace.traceSpec:\n if (spec.compiledRegex.match(self.entityName)):\n self.traceLevel = spec.level\n break\n #endIf\n #endFor", "def trace(string):\n if trace_enabled:\n print(string)", "def setTraceLevel (self,level):\n if (type(level) == type(\"\") or type(level) == type(u\"\")):\n if (level):\n level = self._coerceLevel(level)\n self.traceLevel = level\n #endIf\n elif (type(level) == type(0)):\n if (self._isTraceLevel(level)):\n self.traceLevel = level\n else:\n # level is a number but not in the range of a trace level.\n raise TraceLevelException(\"Invalid trace level: %s Valid trace levels are defined by the Level class.\" % level)\n #endIf\n else:\n # Odd case where level is unexpected type\n raise TraceLevelException(\"Trace level must be either a string or an integer. Use levels defined by the Level class.\")\n #endIf", "def set_trace(self, depth=1):\n self.disable_io_capture()\n dbg = Debugger()\n pudb.set_interrupt_handler()\n dbg.set_trace(sys._getframe(depth))", "def set_trace_color(color): #py:set_trace_color\n RUR._set_trace_color_(color)", "def __trace_ui_name(trace_name, trace_level):\n\n pass", "def trace(self, module, message):\n if self.log_level <= consts.LOG_LEVEL_TRACE:\n print(\"TRACE : %s: %s\" % (module, message))", "def trace(self, trace=...):\n ...", "def replace_trace(trace=None):\n oldtrace = sys.gettrace()\n sys.settrace(trace)\n try:\n yield\n finally:\n # specific hack to work around a bug in pycoverage, see\n # https://bitbucket.org/ned/coveragepy/issue/123\n if (oldtrace is not None and not callable(oldtrace) and\n hasattr(oldtrace, 'pytrace')):\n oldtrace = oldtrace.pytrace\n sys.settrace(oldtrace)", "def _logging_individual_modules(logmod, modlevel):\n if not logmod:\n return\n\n module_log_level = getattr(logging, modlevel.upper(), modlevel)\n if not isinstance(module_log_level, int):\n try:\n module_log_level = int(module_log_level)\n except ValueError:\n logging.warning(\"Could not parse modlevel {}\".format(modlevel))\n module_log_level = logging.DEBUG\n\n for submodule in logmod:\n logging.getLogger(submodule).setLevel(module_log_level)", "def settrace_patch(tracefunc: Any) -> None:\n global _is_debugger_active\n _is_debugger_active = bool(tracefunc)\n try:\n _original_settrace(tracefunc)\n except Exception:\n # IDEs, such as PyCharm, may ban calls to settrace().\n # http://pydev.blogspot.com/2007/06/why-cant-pydev-debugger-work-with.html\n # In such cases, do nothing.\n pass", "def set_trace():\n Bdb().set_trace()", "def _setup_log_levels(self):\n for logger_name, level in self.named_levels.items():\n logger = logging.getLogger(logger_name)\n logger.setLevel(level)\n LOG.info(\"Set %s to use logging level %s\", logger_name, level)", "def _log_level_from_string(string):\n try:\n return getattr(logging, string)\n except AttributeError:\n raise ValueError('invalid log level: %r' % string)", "def setmodule(self, module, priority='project'):\n\t\tself._assert_mutability()\n\t\tif isinstance(module, six.string_types):\n\t\t\tmodule = import_module(module)\n\t\tfor key in dir(module):\n\t\t\tif key.isupper():\n\t\t\t\tself.set(key, getattr(module, key), priority)", "def trace_id_set(trace_id: tuple[str, str]) -> None:\n trace_id_cv.set(trace_id)", "def add_standards(logging):\n add_log_level(\n # Log debug details of constant changes\n TRACE=5 , \n # Log debug with a little more chattyness\n VERBOSE=7,\n logging=logging\n )\n\n # Log a suppressed exception at warning level\n add_log_level(\n SUPPRESSED=logging.WARN,\n exceptions=True, \n logging=logging\n )", "def selectTrace(self,trace: int) -> None:\n\n if not self.debug:\n self.myFieldFox.write(\"CALC:PAR\" + str(trace) + \":SEL\")\n\n return", "def set_trace(self, frame=None):\n if frame is None:\n frame = sys._getframe().f_back\n self.reset()\n while frame:\n frame.f_trace = self.trace_dispatch\n self.botframe = frame\n frame = frame.f_back\n self.set_step()\n sys.settrace(self.trace_dispatch)", "def settrace(function): # real signature unknown; restored from __doc__\n pass", "def patch_traces(self, traces, project_id=None, options=None):\n if project_id is None:\n project_id = self.project\n\n self.trace_api.patch_traces(\n project_id=project_id,\n traces=traces,\n options=options)", "def set_tracing(self, tracing: bool) -> None:\n self.tracing = tracing", "def setModule(name, module):", "def _configureTraceAndLogging(self,traceArgs):\n logFile = self._getArg(['logFile','logfile'], traceArgs)\n if (logFile):\n TR.appendTraceLog(logFile)\n #endIf\n\n trace = self._getArg(['trace', 'loglevel'], traceArgs)\n\n if (trace):\n if (not logFile):\n TR.appendTraceLog('trace.log')\n #endDef\n\n TR.configureTrace(trace)\n #endIf\n return (trace,logFile)", "def set_trace_style(style=\"default\"): #py:set_trace_style\n if style not in [\"thick\", \"default\", \"invisible\"]:\n raise ReeborgError(\"Unrecognized style in set_trace_style().\")\n RUR._set_trace_style_(style)", "def set_trace():\r\n # without this in iPython debugger can generate strange characters.\r\n from IPython.core.debugger import Pdb\r\n Pdb().set_trace(sys._getframe().f_back)", "def set_loggers_level(names, level=logging.CRITICAL):\n for item in names:\n logging.getLogger(item).setLevel(level)" ]
[ "0.7233881", "0.6644756", "0.6296946", "0.60611576", "0.56253296", "0.52577573", "0.51801455", "0.48836422", "0.4874601", "0.48213887", "0.48206002", "0.48105368", "0.4734947", "0.47317776", "0.47311783", "0.47059482", "0.46913573", "0.46432233", "0.4641233", "0.46398026", "0.46393448", "0.4628556", "0.4606741", "0.45713323", "0.45625043", "0.45579496", "0.4527356", "0.4511955", "0.4497215", "0.44702747" ]
0.8441392
0
the freshly created queue has 'rear' value of 0 (the next available pointer in the queue buffer).
def test_queue_rear(self): queue = Queue(shape=(100, 2, 2, 2)) self.assertEqual(queue.rear, 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dequeue(self):\n pass", "def dequeue(self):\n pass", "def AdvanceQueue(self):\r\n self.data.pop(0)\r\n return", "def dequeue(self):\n if self.isEmpty():\n raise Exception(\"Queue underflow\")\n item = self._q[self._first]\n self._q[self._first] = None # to avoid loitering\n self._N -= 1\n self._first += 1\n if self._first == len(self._q):\n self._first = 0 # wrap-around\n # shrink size of array if necessary\n if self._N > 0 and self._N == len(self._q)/4:\n self._resize(len(self._q)/2)\n return item", "def dequeue(self):", "def Rear(self):\r\n if (len(self.queue) >= 1):\r\n return self.queue[-1]\r\n else:\r\n return -1", "def Rear(self):\n # empty queue\n if self.count == 0:\n return -1\n return self.queue[(self.headIndex + self.count - 1) % self.capacity]", "def Rear(self):\n return -1 if self.isEmpty() else self.queue[self.end]", "def Rear(self):\n if self.isEmpty():\n return -1\n else:\n return self.queue[self.rear - 1]", "def Rear(self):\n if self.isEmpty():\n return -1\n else:\n return self.queue[self.rear - 1]", "def Rear(self):\n if self.isEmpty():\n return -1\n else:\n return self.queue[-1]", "def getRear(self):\n\t\trear = self.queue[self.rear]\n\t\treturn rear", "def test_dequeue(self):\r\n from numpy import random\r\n queue = Queue(shape=(11, 2, 3, 4), dtype='int16')\r\n for i in range(100):\r\n arr_in = random.randint(4096,size = (2,2,3,4))\r\n queue.enqueue(arr_in)\r\n arr_out = queue.dequeue(2)\r\n self.assertEqual((arr_in==arr_out).all(), True)\r\n self.assertEqual(queue.length,0)\r\n self.assertEqual(queue.global_rear,(i+1)*2)\r\n self.assertEqual(queue.rear,2*(i+1)-int(2*(i+1)/11)*11)\r\n\r\n from numpy import random\r\n queue = Queue(shape=(32, 2, 3, 4), dtype='int16')\r\n for i in range(100):\r\n arr_in = random.randint(4096,size = (1,2,3,4))\r\n queue.enqueue(arr_in)\r\n self.assertEqual(queue.length,1)\r\n arr_out = queue.dequeue(1)\r\n self.assertEqual((arr_in==arr_out).all(), True)\r\n self.assertEqual(queue.length,0)\r\n self.assertEqual(queue.global_rear,(i+1)*1)\r\n self.assertEqual(queue.rear,1*(i+1)-int(1*(i+1)/queue.shape[0])*queue.shape[0])", "def peekRear(self):\n # ADD CODE HERE\n if self._size == 0:\n raise AttributeError(\"Cannot peek rear from an empty Deque\")\n return self._rear.getData()", "def __init__(self,size=10):\n \n self.inbound = Queue() #an internal queue to manage the class properly in a thread safe manner.\n self.index = Value('i',0) #index of next item to be added.\n self.manager = Manager()\n \n self.buffer = self.manager.list() #the buffer we will store things in.\n self.size = size #the maximum size of the buffer\n self.newitem = Queue() #a blocking event to control the pop method\n t = threading.Thread(target=self.worker) #the worker that will run when items are added.\n t.start() #start the worker\n self.newitemindex = 0 #index of items to pop", "def checkQueue( self ):\n if self.queue:\n yield self.writeToSerial( self.queue.pop( 0 ) )\n else:\n self.free = True", "def dequeue(self):\n raise NotImplementedError(\"dequeue: You should have implemented this method!\")", "def fill_rawq(self):\n # The buffer size should be fairly small so as to avoid quadratic\n # behavior in process_rawq() above\n\n buf = self.channel.recv(50)\n self.msg(\"recv %r\", buf)\n self.eof = (not buf)\n self.rawq = self.rawq + buf", "def _dequeue(self):\n return self._queue.popleft()", "def test_size_decrements_with_dequeue():\n queue = Queue()\n queue.enqueue('val')\n assert queue.size() == 1\n queue.dequeue()\n assert queue.size() == 0", "def test_the_peek(the_queue):\n the_queue.enqueue(1)\n the_queue.enqueue(2)\n the_queue.enqueue(3)\n the_queue.dequeue()\n assert the_queue._new_dll.tail.data == 2", "def Rear(self):\n # empty queue\n if self.count == 0:\n return -1\n return self.tail.value", "def dequeue(self):\r\n if self.size():\r\n self.queue.pop(0)\r\n else:\r\n raise IndexError(\"Queue is empty.\")", "def curr_queue(self):\n pass", "def __init__(self):\n self.queue = deque()", "def __init__(self):\n self.queue = deque()", "def __init__(self):\n self.queue = deque()", "def __init__(self):\n self.queue = deque()", "def test_dequeue(self):\n queue = Queue()\n self.assertEqual(queue.dequeue(), None)\n queue.enqueue(1)\n queue.enqueue(2)\n queue.enqueue(3)\n self.assertEqual(queue.dequeue(), 1)\n self.assertEqual(queue.size(), 2)", "def __init__(self):\r\n self.queue = []\r\n self.current = False" ]
[ "0.6964394", "0.6964394", "0.695485", "0.69021404", "0.6897588", "0.6881007", "0.6785619", "0.6758882", "0.67280215", "0.67280215", "0.67083055", "0.6669669", "0.6658059", "0.66144186", "0.6604896", "0.65884155", "0.6587446", "0.6578113", "0.65759975", "0.6553688", "0.6553572", "0.6515944", "0.6497573", "0.6481708", "0.6475256", "0.6475256", "0.6475256", "0.6475256", "0.64750403", "0.6468566" ]
0.730834
0
Convert a BigBird string into a JSON object.
def bbjson(s): return BunchDict(json.loads(unquote(s)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def string2json(self, string):\n kwargs = {\n 'cls': BytesEncoder, # use the IPython bytes encoder\n 'indent': 1,\n 'sort_keys': True,\n 'separators': (',', ': '),\n }\n return cast_unicode(json.dumps(string, **kwargs), 'utf-8')", "def convert_to_json(self, string):\n return json.dumps(string)", "def from_json_string(my_str):\n robj = json.loads(my_str)\n return robj", "def as_json(s):\n return json.dumps(s.as_dict())", "def to_json(self, value):\n # force parsing of strings into correct json structures\n if isinstance(value, str):\n value = self.to_dict(value)\n return json.dumps(value)", "def from_string(string):\n # in order to complete this lab we are going to use the python lib json in which we have the function json.loads\n # which will automatically load a json from a string\n return json.loads(string)", "def json_loads(self, string: str) -> object:\n return json.loads(string)", "def from_json_string(my_str):\n import json\n return json.loads(my_str)", "def parse_json(string):\n\n\ttry:\n\t\tjson_blob = json.loads(string)\n\n\texcept ValueError as excep:\n\t\tmsg = \"Failed to parse line `{0}` with `{1}` exception.\"\n\t\tprint(msg.format(string, excep), file=sys.stderr)\n\t\treturn\n\n\tif isinstance(json_blob, (int, float)):\n\t\treturn {\"value\": json_blob}\n\n\telif isinstance(json_blob, dict):\n\t\treturn json_blob\n\n\telse:\n\t\tprint(\n\t\t\t\"Decoded JSON is not a chartable data-type: {}\".format(json_blob),\n\t\t\tfile=sys.stderr\n\t\t)", "def from_json_string(my_str):\n return loads(my_str)", "def from_json_string(my_str):\n return json.loads(my_str)", "def from_json_string(my_str):\n return json.loads(my_str)", "def from_json_string(my_str):\n return json.loads(my_str)", "def from_json_string(my_str):\n\n return json.loads(my_str)", "def from_json_string(my_str):\n\n return json.loads(my_str)", "def cast_to_jdict(value):\n if isinstance(value, str):\n value = value.strip()\n if value:\n first_char = value[0]\n if first_char in {'[', '{'}:\n return json.loads(value)\n elif value in ['true', 'false', 'null']:\n return json.loads(value)\n elif os.path.isfile(value):\n return json.load(value)\n else:\n return json.loads(\n '[' + value + ']'\n ) # wrap in brackets and call json.loads\n else:\n return ''\n else:\n return value", "def json_decode(value):\r\n return json.loads(to_basestring(value))", "def to_json(cls, data):\n if isinstance(data, str):\n return json.loads(data)\n return data", "def from_json_string(my_obj):\n\n return(json.loads(my_obj))", "def from_json_string(my_str):\n rep = json.loads(my_str)\n return rep", "def decode(self, bytestring, **options):\n try:\n return json.loads(\n bytestring.decode(\"utf-8\"), object_pairs_hook=collections.OrderedDict\n )\n except ValueError as exc:\n raise ParseError(\"Malformed JSON. %s\" % exc)", "def _deserialize_binary_string(self, avro_schema, json_str):\n if self.__needs_encoding(json_str):\n json_str = json_str.encode(self.BYTES_CHARSET)\n return json_str", "def test_json_string(self):\n Base._Base__nb_objects = 0\n d1 = {\"id\": 9, \"width\": 5, \"height\": 9, \"x\": 7, \"y\": 8}\n d2 = {\"id\": 10, \"width\": 3, \"height\": 15, \"x\": 4, \"y\": 0}\n json_string = Base.to_json_string([d1, d2])\n self.assertTrue(type(json_string) is str)\n d = json.loads(json_string)\n self.assertEqual(d, [d1, d2])", "def from_json_string(my_str):\n return (json.loads(my_str))", "def from_json_string(my_str):\n return (json.loads(my_str))", "def from_json_string(my_str):\n return (json.loads(my_str))", "def to_json_string(my_obj):\n obj = j.dumps(my_obj)\n return obj", "def to_python(self, value):\n if value is not None and isinstance(value, basestring):\n value = str(value)\n if value.startswith('base64:'):\n value = value.split(':')[1]\n prepared_data = json.loads(base64.decodestring(value))\n return json.loads(prepared_data)\n elif value is not None and isinstance(value, dict):\n value = json.dumps(value)\n return value\n\n return value", "def read_string(self, string, **kwds):\n self._dict.update(json.loads(string))", "def to_json_string(my_obj):\n j_obj = json.dumps(my_obj)\n return j_obj" ]
[ "0.65743154", "0.6400961", "0.59088975", "0.59016544", "0.5773446", "0.5773138", "0.5771175", "0.5762785", "0.5758166", "0.57323337", "0.5724494", "0.5724494", "0.5724494", "0.5724225", "0.5724225", "0.57240826", "0.56558675", "0.56424606", "0.5609231", "0.5574156", "0.55466145", "0.553522", "0.55212295", "0.5509579", "0.5509579", "0.5509579", "0.549643", "0.54870313", "0.54821396", "0.54780596" ]
0.68068093
0
Map a userprovided query expression over a set of JSON objects. The query is executed for every JSON object in the input file. The JSON object is bound to the special variable '_'. In addition, all JSON attributes appear in the global namespace for the query expression.
def map(query, fh, skip_header_row, default_obj={}): # First, try the JsonRecordReader; then attempt the csv record reader reader = MetaRecordReader(default_obj) # Hack: append an 'else []' to queries that lack an else clause if " if " in query and not " else " in query: query = query + " else []" compiled_query = compile(query, 'STRING', 'eval') it = iter(fh) if skip_header_row: next(it) for line in it: obj, env = reader.get_record(line) obj_out = eval(compiled_query, env) if isinstance(obj_out, list): # Lists are treated as flatmap yield from obj_out else: yield obj_out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_query(file: TextIO) -> 'NGOMatch':\n query_dict = {}\n query_dict['skills'] = {}\n query_dict['interest'] = []\n line = file.readline().strip()\n \n query_dict['skills']['technical'] = []\n query_dict['skills']['interpersonal'] = []\n \n line = file.readline().strip()\n line = file.readline().strip()\n while line != 'Interpersonal':\n query_dict['skills']['technical'].append(line)\n line = file.readline().strip() \n \n line = file.readline().strip()\n while line != 'INTEREST':\n query_dict['skills']['interpersonal'].append(line)\n line = file.readline().strip()\n \n line = file.readline().strip() \n while line != 'NUMBER':\n query_dict['interest'].append(line)\n line = file.readline().strip()\n \n line = file.readline().strip()\n while line != 'SORT':\n query_dict['number'] = line\n line = file.readline().strip()\n \n line = file.readline().strip()\n while line != '':\n if line[:5] == 'skill':\n query_dict['sort-by']['skill'] = line[5:].strip()\n if line [:8] == 'interest':\n query_dict['sort-by']['interest'] = line[8:].strip()\n line = file.readline().strip()\n \n return query_dict", "def process_query(query_file):\r\n query_data = query_file.readlines()\r\n query_dict = {}\r\n x = 1 \r\n search_dict = {}\r\n search_dict['username'] = query_data[x].strip('\\n')\r\n x += 1\r\n operation_list = []\r\n \r\n while query_data[x] != 'FILTER\\n': \r\n operation_list.append(query_data[x].strip('\\n'))\r\n x += 1\r\n \r\n search_dict['operations'] = operation_list \r\n query_dict['search'] = search_dict \r\n x += 1\r\n \r\n filter_dict = {}\r\n filter_format(filter_dict, query_data, 'name-includes', x)\r\n filter_format(filter_dict, query_data, 'location-includes', x)\r\n filter_format(filter_dict, query_data, 'follower', x)\r\n filter_format(filter_dict, query_data, 'following', x)\r\n query_dict['filter'] = filter_dict\r\n \r\n present_dict = {}\r\n sort_by = query_data[-2].strip('sort-by ')\r\n present_dict['sort-by'] = sort_by.strip('\\n')\r\n \r\n format_type = query_data[-1].lstrip('format ')\r\n present_dict['format'] = format_type\r\n query_dict['present'] = present_dict\r\n \r\n return query_dict", "def json_query(data, *query_elems):\n paths = [x.split('.') for x in query_elems]\n def generator():\n for elem in data:\n row = []\n for path in paths:\n row.append(_get_field(elem, path))\n for x in itertools.product(*row):\n yield x\n return pd.DataFrame(generator(), columns=query_elems)", "def _request_all_objects_in_expression(self, expr, **other_request_params):\n _ast = ast.parse(expr, mode='eval')\n _reqs = []\n for _node in ast.walk(_ast):\n if isinstance(_node, ast.Name):\n _obj_spec = _node.id\n elif isinstance(_node, ast.Str):\n _obj_spec = _node.s\n else:\n continue\n\n if ':' in _obj_spec:\n _reqs.append(dict(object_spec=_obj_spec, force_rerequest=False, **other_request_params))\n self.request(_reqs)", "def _clean_query(self, query):\n for object_query in query:\n filters = object_query.get(\"filters\", {}).get(\"expression\")\n self._clean_filters(filters)\n self._macro_expand_object_query(object_query)\n return query", "def _expand_query_list(session, queries, recursive=False, verbose=False):\n results = []\n\n # If no queries are supplied by the user, default to a query for the\n # current working directory\n if len(queries) == 0:\n queries = [get_cwd()]\n\n # Wildcard expansion is performed first, so it can be combined with other types\n # of expansion, such as recursive expansion of subcollections later. Each collection\n # or data object is expanded only once.\n preprocessed_queries = []\n already_expanded = {}\n for query in queries:\n # Currently only wildcards without a collection path are supported\n # e.g. \"*.dat\", but not \"../*.dat\" or \"*/data.dat\".\n if \"/\" not in query and (\"?\" in query or \"*\" in query):\n for d in get_dataobjects_in_collection(session, get_cwd()):\n if fnmatch(d[\"name\"],\n query) and d[\"full_name\"] not in already_expanded:\n preprocessed_queries.append(d[\"full_name\"])\n already_expanded[d[\"full_name\"]] = 1\n for c in get_direct_subcollections(session, get_cwd()):\n parent, coll = os.path.split(c[\"name\"])\n if fnmatch(coll, query) and d[\"name\"] not in already_expanded:\n preprocessed_queries.append(c[\"name\"])\n already_expanded[d[\"name\"]] = 1\n else:\n preprocessed_queries.append(query)\n\n for query in preprocessed_queries:\n absquery = convert_to_absolute_path(query)\n if collection_exists(session, absquery):\n results.append({\"original_query\": query, \"expanded_query\": absquery,\n \"expanded_query_type\": \"collection\"})\n if verbose:\n print_debug(\"Argument \\\"{}\\\" is a collection.\".format(query))\n if recursive:\n for subcollection in get_subcollections(session, absquery):\n if verbose:\n print_debug(\"Recursively adding subcollection \" +\n subcollection + \" to queries.\")\n results.append({\"original_query\": query,\n \"expanded_query\": subcollection,\n \"expanded_query_type\": \"collection\"})\n elif dataobject_exists(session, absquery):\n results.append({\"original_query\": query, \"expanded_query\": absquery,\n \"expanded_query_type\": \"dataobject\"})\n if verbose:\n print_debug(\"Argument \\\"{}\\\" is a data object.\".format(query))\n else:\n print_error(\n \"Query \\\"{}\\\" could not be resolved. Ignoring ... \".format(query))\n\n return results", "def query_items_handler(query):\n items = getItemsByName(query)\n return jsonify(items=[i.serialize for i in items])", "def apply_special_queries(query, specials):\n for i in specials:\n query = FILTERS_LIST[i](specials[i], query)\n return query", "def _parse_user_query(self, query):\n def _parse_basic_query(attr, value):\n if isinstance(value, str) and '*' in value:\n return MatchGlob(attr, value)\n else:\n return Eq(attr, value)\n\n if isinstance(query, dict):\n subqueries = []\n for attr, value in query.iteritems():\n if isinstance(value, (list, set, tuple)):\n # If value is a list or similar, we build an OR\n or_queries = []\n for or_query in value:\n or_queries.append( _parse_basic_query(attr, or_query) )\n subqueries.append( Or(*or_queries) )\n else:\n subqueries.append(_parse_basic_query(attr, value))\n query = And(*subqueries)\n return query", "def process_query (file):\n\n # initialize all the dictionaries and lists we will be using\n query_data = {}\n query_data ['search'] = {'operations':[]}\n query_data ['filter'] = {}\n query_data ['present'] = {}\n\n temp = ''\n\n file.readline() # for when the file says SEARCH\n\n query_data ['search']['username'] = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != 'FILTER': # go until the the filter section\n query_data ['search']['operations'].append (temp)\n temp = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != 'PRESENT': # go until the present section\n # we make the key everything from the beginning to the first space\n # then the value is everything after the first space\n query_data ['filter'][temp[:temp.find(' ')]] = temp[temp.find(' ') + 1:]\n temp = file.readline().strip()\n\n temp = file.readline().strip()\n while temp != '': # go until the end of the file\n # same process as the previous while loop\n query_data ['present'][temp[:temp.find(' ')]] = temp[temp.find(' ') + 1:]\n temp = file.readline().strip()\n\n return query_data", "def query_mapping(y, index):\n d = {}\n if any([i in QueryOperations.arithmetic_operators\n for i in y]):\n\n y = solve_arithmetic(y)\n\n if len(y) == 1:\n parse_function = function_lexer(y[0])\n\n if parse_function[0] in QueryOperations.arithmetic_operators:\n raise NotImplemented501()\n # I shld raise a 400 as this query, although given as example in docs,\n # should not be allowed in my opinion\n else:\n func = QueryFunctions.implemented[parse_function[0]]\n\n return func(parse_function[1], index=index)\n\n elif len(y) == 3: # valid must be comparison\n operandA = y[0].split(\"/\")\n operandA = [MODEL_KEYS[x] if x in MODEL_KEYS else x for x in operandA]\n operandA = '__'.join(operandA)\n operator = QueryOperations.comparison_operators[y[1]]\n operandB = y[2]\n\n if function_lexer(operandA):\n parse_function = function_lexer(operandA)\n if parse_function[0] in QueryOperations.arithmetic_operators:\n func = QueryOperations.arithmetic_operators[parse_function[0]]\n else:\n func = QueryFunctions.implemented[parse_function[0]]\n strng = re.search('^(?![0-9.]*$).+$', operandB)\n\n if (func == QueryFunctions.round or\n func == QueryFunctions.floor or\n func == QueryFunctions.ceiling) and strng:\n raise BadRequest\n\n funcres = func(parse_function[1], index=index, numbers=False)\n\n try:\n operandA = funcres['query_field']\n QueryObjects.TEMP_FIELD = funcres['annotation']\n except KeyError:\n pass\n if operandB[0] == \"'\" or operandB[0] == '\"':\n operandB = operandB[1:-1]\n\n if operandB == \"now()\":\n now = timezone.now()\n operandB = now\n operandB = now.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n\n if operandB == \"maxdatetime()\":\n maxdatetime = datetime(\n year=9999,\n month=12,\n day=30,\n hour=11,\n minute=59,\n second=59,\n tzinfo=timezone.utc\n ) # postgres error: date out of range if day = 31\n operandB = maxdatetime.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n\n if operandB == \"mindatetime()\":\n mindatetime = datetime(\n year=1,\n month=1,\n day=2,\n hour=0,\n minute=0,\n second=0,\n tzinfo=timezone.utc\n ) # postgres error: \"date out of range\" if day = 1\n operandB = mindatetime.strftime(\"%Y-%m-%d %H:%M:%S.%f\")\n\n if function_lexer(operandB):\n parse_function = function_lexer(operandB)\n if parse_function[0] in QueryOperations.arithmetic_operators:\n func = QueryOperations.arithmetic_operators[parse_function[0]]\n operandB = func(parse_function[1], numbers=True)\n\n else:\n func = QueryFunctions.implemented[parse_function[0]]\n # return func(parse_function[1], index=index)\n raise NotImplemented501()\n\n\n try:\n operandB = float(operandB)\n except ValueError:\n pass\n if operandA == 'result':\n operandA = 'result__result'\n if operandB == 'result':\n operandB = 'result__result'\n d['query'] = Q(**{operandA + operator: operandB})\n return d\n else:\n raise ParseError()", "def fromJSON(jdata):\n if jdata['type'] == 'compound':\n queries = [orb.Query.fromJSON(jquery) for jquery in jdata['queries']]\n out = orb.QueryCompound(*queries)\n out.setOp(orb.QueryCompound.Op(jdata['op']))\n return out\n else:\n if jdata.get('model'):\n model = orb.schema.model(jdata.get('model'))\n if not model:\n raise orb.errors.ModelNotFound(schema=jdata.get('model'))\n else:\n column = (model, jdata['column'])\n else:\n column = (jdata['column'],)\n\n query = orb.Query(*column)\n query.setOp(orb.Query.Op(jdata.get('op', 'Is')))\n query.setInverted(jdata.get('inverted', False))\n query.setCaseSensitive(jdata.get('caseSensitive', False))\n query.setValue(jdata.get('value'))\n\n # restore the function information\n for func in jdata.get('functions', []):\n query.addFunction(orb.Query.Function(func))\n\n # restore the math information\n for entry in jdata.get('math', []):\n query.addMath(orb.Query.Math(entry.get('op')), entry.get('value'))\n return query", "def execute_json_query(self, raw_query, order_by_field):\n return oai_record_api.execute_json_query(\n raw_query, self.request.user, order_by_field\n )", "def _format_queries(self, body):\n for query in body:\n if \"bindVars\" in query:\n query[\"bind_vars\"] = query.pop(\"bindVars\")\n if \"runTime\" in query:\n query[\"runtime\"] = query.pop(\"runTime\")\n return body", "def execute(self, triple_map, output, **kwargs):\n subjects = []\n logical_src_iterator = str(triple_map.logicalSource.iterator)\n json_object = kwargs.get('obj', self.source)\n # Removes '.' as a generic iterator, replace with '@'\n if logical_src_iterator == \".\":\n results = [None,]\n else:\n json_path_exp = jsonpath_ng.parse(logical_src_iterator)\n results = [r.value for r in json_path_exp.find(json_object)][0]\n for row in results:\n subject = self.generate_term(term_map=triple_map.subjectMap,\n **kwargs)\n for pred_obj_map in triple_map.predicateObjectMap:\n predicate = pred_obj_map.predicate\n if pred_obj_map.template is not None:\n output.add((\n subject,\n predicate,\n self.generate_term(term_map=pred_obj_map, **kwargs)))\n\n if pred_obj_map.parentTriplesMap is not None:\n self.__handle_parents__(\n output,\n parent_map=pred_obj_map.parentTriplesMap,\n subject=subject,\n predicate=predicate,\n obj=row,\n **kwargs)\n if pred_obj_map.reference is not None:\n ref_exp = jsonpath_ng.parse(str(pred_obj_map.reference))\n found_objects = [r.value for r in ref_exp.find(row)]\n for obj in found_objects:\n if rdflib.term._is_valid_uri(obj):\n rdf_obj = rdflib.URIRef(str(obj))\n else:\n rdf_obj = rdflib.Literal(str(obj))\n output.add((subject, predicate, rdf_obj))\n if pred_obj_map.constant is not None:\n output.add((subject,\n predicate,\n pred_obj_map.constant))\n subjects.append(subject)\n return subjects", "def process_queries(req, save_dir, replace_allowed):\n create_dir_if_not_exists(save_dir)\n req_obj = json.loads(req.data)\n result_dict = dict()\n if not all(map(lambda x: all(map(lambda y: isinstance(y, str), x)), req_obj.items())):\n return {\"message\": f\"Not all query names or values are strings\"}, 400\n for query_name, query_value in req_obj.items():\n exists = os.path.exists(os.path.join(save_dir, query_name))\n if not is_valid_query(query_value):\n result_dict[query_name] = \"invalid XPath expression\"\n elif not replace_allowed and exists:\n result_dict[query_name] = \"not created: already exists\"\n elif replace_allowed and not exists:\n result_dict[query_name] = \"not replaced: does not exist\"\n else:\n with open(os.path.join(save_dir, query_name), 'w') as f:\n f.write(query_value)\n result_dict[query_name] = \"replaced\" if replace_allowed else \"created\"\n if len(os.listdir(save_dir)) == 0:\n os.rmdir(save_dir)\n return result_dict, 200", "def query(src, jq):\n for res in Query(jq).execute(loads(src)):\n if isinstance(res, (Null, Node)):\n yield pyjson.loads(str(res))\n else:\n yield [pyjson.loads(str(item)) for item in res]", "def compile(query, use_nested_keys=True,\n key_separator='.', case_sensitive=True,\n raise_keyerror=False):\n ast = parser.parse(query)\n return DataQueryVisitor(\n ast, use_nested_keys=use_nested_keys,\n key_separator=key_separator, case_sensitive=case_sensitive,\n raise_keyerror=raise_keyerror)", "def query(jsonString):\n\n logger.debug(jsonString)\n\n # decode json\n # Product.objects.select_related()\n jsonQuery = parseJson(jsonString)\n\n # ensure we passed in keywords\n if 'keywords' not in jsonQuery:\n logger.error(\"No keywords in query json: %s\", jsonQuery)\n raise ValueError(\"Expecting json with keywords attribute\")\n\n # TODO: enable keywords - get a set of products based on the keywords\n # keywords = jsonQuery['keywords']\n products = Product.objects.only(\"normalized\", \"attr.name\", \"attr.image\", \"attr.url\")\n #products = Product.objects\n\n # TODO: should this stuff happen in the API? Or in helper functions?\n if 'filters' in jsonQuery:\n merger = FilterMerger()\n\n for f in jsonQuery['filters']:\n merger.add(f)\n\n # filter the products based on the filters\n d = merger.merge()\n products = products.filter(**d)\n else:\n logger.info(\"No filters in query json: %s\", jsonQuery)\n\n products = [p for p in products]\n\n selected_attrs = ['size_class']\n if 'attributes' in jsonQuery:\n selected_attrs = jsonQuery['attributes']\n\n dep_attrs = ['price', 'ratings_avg']\n\n all_relevant_attrs = selected_attrs + dep_attrs\n\n #TODO: populate absolute stats automatically\n response_json = {'attrs': {},\n 'selectedAttrs': selected_attrs,\n 'rawData': [],\n 'topProducts': [],\n 'absoluteStats': {'priceMax': 9999.98,\n 'priceMin': 69.99,\n 'priceRange': 9999.98 - 69.99,\n 'sizeMax': 92,\n 'sizeMin': 8,\n 'sizeRange': 92 - 8}}\n\n for ai in AttrInfo.objects.filter(**{'is_independant': True}):\n all_relevant_attrs.append(ai.name)\n if ai.rank < 0:\n continue\n\n ai_json = {}\n response_json['attrs'][ai.name] = ai_json\n ai_json['name'] = ai.name\n ai_json['displayName'] = ai.display_name\n ai_json['helpText'] = ai.help_text\n ai_json['rank'] = ai.rank\n ai_json['isDiscrete'] = ai.is_discrete\n ai_json['units'] = ai.units\n ai_json['options'] = []\n\n for val in ai.values:\n val_filtered = docSetFilter(products, ai.name, val, None)\n if len(val_filtered) == 0:\n val_json = {}\n ai_json['options'].append(val_json)\n val_json['value'] = val\n val_json['count'] = 0\n val_json['stats'] = {}\n else:\n val_json = {}\n ai_json['options'].append(val_json)\n val_json['value'] = val\n val_json['count'] = len(val_filtered)\n val_json['stats'] = {}\n\n for dep in dep_attrs + selected_attrs:\n dep_filtered = docSetFilter(val_filtered, None, None, dep)\n if len(dep_filtered) == 0:\n continue\n\n vector = docSetToVector(val_filtered, None, None, dep)\n\n dep_json = {}\n val_json['stats'][dep] = dep_json\n dep_json['name'] = dep\n dep_json['mean'] = numpy.mean(vector)\n dep_json['median'] = numpy.median(vector)\n dep_json['stdDev'] = numpy.std(vector)\n dep_json['min'] = numpy.min(vector)\n dep_json['max'] = numpy.max(vector)\n\n for prod in products:\n prod_json = getSingleProductJson(prod, all_relevant_attrs)\n response_json['rawData'].append(prod_json)\n\n if len(selected_attrs) > 0:\n response_json['topProducts'] = [getSingleProductJson(p, None, True, True,True)\n for p in getTopProducts(products, selected_attrs[0], 'price', 5)]\n\n #response_json['topProducts'] = \\\n #[dict(p.normalized.copy(), id=str(p.id), imgSrc=p.attr['image'], name=p.attr['name'])\n # for p in getTopProducts(products, selected_attrs[0], 'price', 5)]\n\n print \"done\"\n return response_json", "def jsonpath(self, path, patterns=[], queries=[], use_json=True):\n import hxl.filters\n return hxl.filters.JSONPathFilter(self, path, patterns=patterns, queries=queries, use_json=use_json)", "def generate_json_query(self):\n if not self.properties:\n print(\"ERROR: no properties given to generate JSON query.\")\n raise ValueError\n\n if self.data_type == DataType.ENTRY:\n q_str = \"entry_ids\"\n elif \"entit\" in self.data_type.value:\n if \"instance\" in self.data_type.value:\n q_str = \"instance_ids\"\n else:\n q_str = \"entity_ids\"\n elif self.data_type == DataType.ASSEMBLY:\n q_str = \"assembly_ids\"\n elif self.data_type == DataType.CHEMICAL_COMPONENT:\n q_str = \"comp_ids\"\n\n data_str = f\"{self.data_type.value}({q_str}: [\" + \",\".join(f\"\\\"{w}\\\"\" for w in self.id) + \"])\"\n\n props_string = \"\"\n for key, val in self.properties.items():\n if len(val) == 0:\n props_string += f\"{key},\"\n else:\n props_string += f\"{key} {{\" + \",\".join(val) + \"}\"\n\n self.json_query = {'query': \"{\" + data_str + \"{\" + props_string + \"}}\"}", "def execute(self, req):\n\t\tmyPath = req[\"url\"].replace(self.settings[\"ns\"][\"local\"], \"\", 1).split(\"/\")\n\t\tfile = myPath.pop(0)\n\t\tcurrentDir = getcwd()\n\t\tservice = self.basedir + file\n\t\turi = req[\"url\"]\n\t\tqueryPath = \"%s/queries/\" % service\n\t\ttemplatePath = \"%s/\" % service\n\t\ttemplateName = self.mime.getExtension(req[\"request\"].accept_mimetypes.best)\n\t\ttry:\n\t\t\tonlyfiles = [f for f in listdir(queryPath) if isfile(join(queryPath, f))]\n\t\texcept OSError:\n\t\t\tprint \"Warning: Can't find path %s for queries.\" % templatePath\n\t\t\tonlyfiles = []\n\t\tqueries = {}\n\t\tfirst={}\n\t\tfor root, dirs, files in walk(queryPath):\n\t\t\tfor filename in files:\n\t\t\t\ttry:\n\t\t\t\t\tcurrentEndpoint = \"local\"\n\t\t\t\t\t_aux = root.rstrip(\"/\").split(\"/\").pop()\n\t\t\t\t\tif _aux != \"queries\":\n\t\t\t\t\t\tcurrentEndpoint = _aux\n\t\t\t\t\tif not filename.endswith(\".query\"):\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tsqlQuery = self.env.get_template(\"%s/%s\" % (root, filename))\n\t\t\t\t\trenderedSqlQuery = sqlQuery.render(queries=queries, first=first, uri=uri, session=session, flod=self.flod, args=myPath)\n\t\t\t\t\tif re.match(\"^\\s*select\", renderedSqlQuery, flags=re.IGNORECASE) is None:\n\t\t\t\t\t\treturn {\"content\": \"Not a valid SQL Select query\", \"status\": 500}\n\t\t\t\t\tresults = self.sqlserver.query(renderedSqlQuery, currentEndpoint)\n\t\t\t\t\t_name = filename.replace(\".query\", \"\")\n\t\t\t\t\tqueries[_name] = []\n\t\t\t\t\tif results is not None:\n\t\t\t\t\t\tqueries[_name] = results\n\n\t\t\t\texcept Exception, ex:\n\t\t\t\t\tprint sys.exc_info()\n\t\t\t\t\tprint ex\n\t\t\t\t\treturn {\"content\": \"A problem with the SQL endpoint occurred\", \"status\": 500}\n\t\tchdir(currentDir)\n\t\ttry:\n\t\t\tif templateName == \"json\" and not isfile( \"%s%s.template\" % (templatePath, templateName)):\n\t\t\t\tout = json.dumps(queries)\n\t\t\telse:\n\t\t\t\tcontent = self.env.get_template(\"%s%s.template\" % (templatePath, templateName))\n\t\t\t\tout = content.render(queries=queries, uri=uri, session=session, flod=self.flod, args=myPath)\n\t\texcept Exception:\n\t\t\tprint sys.exc_info()\n\t\t\treturn {\"content\": \"Rendering problems\" , \"status\": 500}\n\t\treturn {\"content\": out, \"mimetype\": \"text/html\"}", "def get_objects(columns, filter):\n\n # Database connection\n conn = sqlite3.connect(db_path)\n c = conn.cursor()\n \n # Creating queries\n column_query = \",\".join(col for col in columns)\n filter_query = ' and '.join(k + \"='\" + v +\"'\" for k,v in filter.items())\n \n # Querying database\n c.execute(\"SELECT \" + column_query + \" FROM objects where \" + filter_query)\n rows = c.fetchall()\n\n # Close connection\n conn.close()\n\n # Found objects to dict {cpt : {obj}}\n objects = {i:{} for i in range(len(rows))}\n i = 0\n if columns == [\"*\"]:\n columns = COLUMNS\n for row in rows:\n obj = {col:\"\" for col in columns}\n for j in range(len(row)):\n obj[columns[j]] = row[j]\n objects[i] = obj\n i += 1\n return json.dumps(objects)", "def fetch_posting_list(query):\n\n global final_dictionary\n global final_doc_set\n\n proximity_window_pattern = re.compile('^[0-9]*')\n proximity_operator_pattern = re.compile('[0-9]*\\([0-9a-z ]*\\)')\n\n proximity_operator_list = re.findall(proximity_operator_pattern, query)\n\n if proximity_operator_list:\n for item in proximity_operator_list:\n proximity_query = item[item.find(\"(\") + 1: item.find(\")\")]\n query = query.replace(item, '')\n # The proximity operator is processed in below function\n process_proximity_operator(int(re.search(proximity_window_pattern, item).group()),\n proximity_query.split())\n\n # All the query terms other than proximity operator are processed below.\n # All the documents ids are added to the the global set since it is OR relation.\n query_words = query.split()\n if query_words:\n for word in query_words:\n for key in final_dictionary[pre_process(word)][1]:\n final_doc_set.add(key)\n\n return", "def query_json(\n self, query: str, args: Union[Dict[str, Any], str] = \"null\"\n ) -> Dict[str, Any]:\n return json.loads(self.__state.query(json_str(query), json_str(args)))", "def make_query(self, query, data: Dict):\n return query.format_map(data)", "def queryGenerator(itemset,venue,min_runs):\n level=len(itemset)\n partners=[[] for k in range(level)]\n for j in range(level):\n for i in range(level):\n partners[i].append({'partner'+str(j+1):itemset[i]})\n k=itemset.pop(0)\n itemset.append(k)\n ors=[]\n for cond in partners:\n ors.append({'$and':cond})\n \n match={}\n match['$or']=ors\n match['total_runs']={'$gte':30}\n if venue:\n match['venue']=venue\n\n return match", "def parser(string, queryset):\n QueryObjects.D = {}\n QueryObjects.B = []\n QueryObjects.IND = 0\n QueryObjects.TEMP_FIELD = None\n\n algebra = boolean.BooleanAlgebra()\n query_list = lexer(string)\n query_string = ' '.join(query_list)\n qs = algebra.parse(query_string)\n\n if QueryObjects.TEMP_FIELD:\n queryset = queryset.annotate(**QueryObjects.TEMP_FIELD)\n QueryObjects.TEMP_FIELD = None\n\n locals().update(QueryObjects.D.items())\n query = str(qs)\n query = eval(query)\n queryset = queryset.filter(query)\n return queryset", "def load_chpo_to_query_json(self,chpo_mgjson):\n query_dict = OrderedDict()\n with open(chpo_mgjson,'r') as indata:\n for line in indata:\n record = json.loads(line)\n en_name = record['name_en']\n _id = record['_id']['$oid']\n query_dict[_id] = record\n return query_dict", "def filter(data, query, use_nested_keys=True,\n key_separator='.', case_sensitive=True,\n raise_keyerror=False):\n ast = parser.parse(query)\n dq = DataQueryVisitor(\n ast, use_nested_keys=use_nested_keys,\n key_separator=key_separator, case_sensitive=case_sensitive,\n raise_keyerror=raise_keyerror)\n for item in data:\n if not dq.evaluate(item):\n continue\n yield item" ]
[ "0.52226245", "0.5208916", "0.51859844", "0.5142399", "0.50332826", "0.50120467", "0.49951917", "0.49664053", "0.4890371", "0.4886911", "0.4872044", "0.4824347", "0.48076558", "0.470443", "0.47026193", "0.46869832", "0.46731216", "0.4665948", "0.46637034", "0.46543258", "0.46457097", "0.4633055", "0.46165666", "0.4599032", "0.4597602", "0.45923844", "0.45715478", "0.45671886", "0.45560822", "0.4544682" ]
0.5270592
0
Execute a series of query expressions. 1) Evalue each query over its corresponding input file. 2) If there are two inputs, join the results together. 3) If an aggregate is provided, aggregate the results. 4) If distinct is True, retain one copy of each input. 5) If order_by is provided, sort the results based on the given columns. 6) If limit is provided, return a prefix of the result.
def run_query(queries, files, default_obj, skip_header, agg, distinct, order_by, limit): its = [map(query, phile, skip_header, default_obj) for (query, phile) in zip(queries, files)] it = its[0] if len(its) == 2: it = join_op(*its) if agg is not None: it = aggregate_op(it, agg) if distinct: it = distinct_op(it) if order_by is not None: it = iter(sorted(it, key=itemgetter(*order_by.columns), reverse=order_by.reverse)) if limit > 0: it = itertools.islice(it, limit) return it
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def execute_query(self):\n query_sum = self.initialize_totals()\n data = []\n\n with tenant_context(self.tenant):\n query = self.query_table.objects.filter(self.query_filter)\n query_data = query.annotate(**self.annotations)\n group_by_value = self._get_group_by()\n\n query_group_by = [\"date\"] + group_by_value\n query_order_by = [\"-date\"]\n query_order_by.extend([self.order]) # add implicit ordering\n\n query_data = query_data.values(*query_group_by).annotate(**self.report_annotations)\n\n if self._limit and query_data:\n query_data = self._group_by_ranks(query, query_data)\n if not self.parameters.get(\"order_by\"):\n # override implicit ordering when using ranked ordering.\n query_order_by[-1] = \"rank\"\n\n # Populate the 'total' section of the API response\n if query.exists():\n aggregates = self._mapper.report_type_map.get(\"aggregates\")\n metric_sum = query.aggregate(**aggregates)\n query_sum = {key: metric_sum.get(key) for key in aggregates}\n\n query_data, total_capacity = self.get_cluster_capacity(query_data)\n if total_capacity:\n query_sum.update(total_capacity)\n\n if self._delta:\n query_data = self.add_deltas(query_data, query_sum)\n is_csv_output = self.parameters.accept_type and \"text/csv\" in self.parameters.accept_type\n\n query_data = self.order_by(query_data, query_order_by)\n\n if is_csv_output:\n if self._limit:\n data = self._ranked_list(list(query_data))\n else:\n data = list(query_data)\n else:\n # Pass in a copy of the group by without the added\n # tag column name prefix\n groups = copy.deepcopy(query_group_by)\n groups.remove(\"date\")\n data = self._apply_group_by(list(query_data), groups)\n data = self._transform_data(query_group_by, 0, data)\n\n sum_init = {\"cost_units\": self._mapper.cost_units_key}\n if self._mapper.usage_units_key:\n sum_init[\"usage_units\"] = self._mapper.usage_units_key\n query_sum.update(sum_init)\n\n ordered_total = {\n total_key: query_sum[total_key] for total_key in self.report_annotations.keys() if total_key in query_sum\n }\n ordered_total.update(query_sum)\n\n self.query_sum = ordered_total\n self.query_data = data\n return self._format_query_response()", "def run_query(where_clause, limit=1000):\n sql = \"SELECT * FROM catalog WHERE {} ORDER BY creators, title LIMIT {}\"\\\n .format(where_clause, limit)\n with sqlite3.connect(db_name) as db:\n results = pd.read_sql_query(sql, db)\n print_results(results)", "def build_query(devices, filter_inter, interactions_further=None):\n\n id_2_found = CsvFormat.other_id.value\n if interactions_further is not None:\n id_2_found = CsvFormat.my_id.value\n\n query = 'SELECT i._{} as id, CAST(i._{} as int) as timestamp_start, CAST(i._{} as float) as interval_time ' \\\n 'FROM S3Object i WHERE CAST(i._{} as float) >= {}'.format(\n id_2_found,\n CsvFormat.timestamp_start.value,\n CsvFormat.interval.value,\n CsvFormat.interval.value,\n MINIMUM_INTERVAL_TIME)\n\n if 'distance' in filter_inter:\n if 'max' in filter_inter['distance']:\n query += ' and CAST(i._{} as float) <= {}'.format(CsvFormat.distance_type.value,\n filter_inter['distance']['max'])\n if 'min' in filter_inter['distance']:\n query += ' and CAST(i._{} as float) >= {}'.format(CsvFormat.distance_type.value,\n filter_inter['distance']['min'])\n\n if interactions_further is not None:\n condos = []\n for inter in interactions_further:\n orcond = '(i._{}=\\'{}\\' and (CAST(i._{} as float) between {} and {} or CAST(i._{} as float) between {} and {}))'\\\n .format(CsvFormat.other_id.value,\n inter['id'],\n CsvFormat.timestamp_start.value,\n inter['timestamp_start'],\n int(inter['timestamp_start'] + inter['interval_time']),\n CsvFormat.timestamp_end.value,\n inter['timestamp_start'],\n int(inter['timestamp_start'] + inter['interval_time'])\n )\n condos.append(orcond)\n if len(condos) > 0:\n query += ' and ' + ' or '.join(condos)\n else:\n query += ' and i._{} in (\\'{}\\')'.format(\n CsvFormat.my_id.value,\n '\\',\\''.join([str(x) for x in devices])\n )\n\n return query", "def exec_query(collection,\n collection_name,\n granularity,\n queries,\n query_file_name,\n fig_dir,\n grid_dir):\n\n time_grid = [[None for i in range(granularity)] for j in range(granularity)]\n plan_grid = [[0 for i in range(granularity)] for j in range(granularity)]\n itr_count = 0\n fig_id = 0\n not_exists_marker = 'NULL'\n\n for (query, b_i, a_i) in queries:\n progress = round(float(itr_count) * 100 / len(queries), 2)\n print(\"Progress {}%\".format(progress))\n\n # display result\n if progress % 2 < 0.001:\n display_grid(plan_grid,\n os.path.join(fig_dir,\n collection_name,\n query_file_name.replace(\".txt\", \"\")),\n granularity,\n id=\"fig_{:0>5d}\".format(fig_id))\n fig_id += 1\n\n # timeout\n # t_win, t_a, t_b, t_cover, t_tbl = timeout, timeout, timeout, timeout, timeout\n projection = {\"_id\": 0, \"a\": 1, \"b\": 1}\n\n # measure time consumption of executing each query plan\n print(\"Forcing collscan\")\n table_scan_explain = collection.find(query, projection).hint([(\"$natural\", 1)]).explain()\n t_tbl = table_scan_explain[\"executionStats\"][\"executionTimeMillis\"]\n\n print(\"Forcing aIdx\")\n t_a = not_exists_marker\n if \"aIdx\" in collection.index_information():\n idx_a_explain = collection.find(query, projection).hint(\"aIdx\").explain()\n t_a = idx_a_explain[\"executionStats\"][\"executionTimeMillis\"]\n\n print(\"Forcing bIdx\")\n t_b = not_exists_marker\n if \"bIdx\" in collection.index_information():\n idx_b_explain = collection.find(query, projection).hint(\"bIdx\").explain()\n t_b = idx_b_explain[\"executionStats\"][\"executionTimeMillis\"]\n\n print(\"Forcing coverIdx\")\n t_cover = not_exists_marker\n if \"coverIdx\" in collection.index_information():\n idx_cover_explain = collection.find(query, projection).hint(\"coverIdx\").explain()\n t_cover = idx_cover_explain[\"executionStats\"][\"executionTimeMillis\"]\n\n # NOTE: FORMAT a|b|coverIdx|collscan\n t_s = [str(t_a), str(t_b), str(t_cover), str(t_tbl)]\n time_grid[b_i][a_i] = \"|\".join(t_s)\n\n # run the query without hint\n print(\"Finding winner\")\n exec_explain = collection.find(query, projection).explain()\n # t_win = exec_explain[\"executionStats\"][\"executionTimeMillis\"]\n winning_plan = str(exec_explain['queryPlanner']['winningPlan'])\n\n if 'aIdx' in winning_plan:\n plan_grid[b_i][a_i] = 1\n elif 'bIdx' in winning_plan:\n plan_grid[b_i][a_i] = 2\n elif 'coverIdx' in winning_plan:\n plan_grid[b_i][a_i] = 3\n elif 'COLLSCAN' in winning_plan:\n plan_grid[b_i][a_i] = 4\n\n pprint(exec_explain['queryPlanner'])\n print(\"Time: a: {}, b: {}, cover: {} ,collscan: {}\".format(t_a, t_b, t_cover, t_tbl))\n print(\"=\" * 60)\n\n itr_count += 1\n\n save_grid(plan_grid, os.path.join(grid_dir, collection_name,\n \"plan_grid{}\".format(query_file_name.replace(\"query\", \"\"))))\n save_grid(time_grid, os.path.join(grid_dir, collection_name,\n \"time_grid{}\".format(query_file_name.replace(\"query\", \"\"))))\n\n display_grid(plan_grid,\n os.path.join(fig_dir,\n collection_name,\n query_file_name.replace(\".txt\", \"\")),\n granularity,\n id=\"fig_{:0>5d}\".format(fig_id))\n return", "def query(_from, _select, _geomselect=None, _where=None, _groupby=None, _limit=None):\n # INSTEAD MAKE INTO CLASS\n # WITH .fields attr\n # AND .__iter__()\n # AND .get_vectordata()\n # AND MAKE EACH YIELDED ROW A VECTOR FEATURE CLASS\n # THIS WAY ALLOWING CHAINED QUERIES\n\n # parse args\n iterables = _from\n columnfuncs = _select\n geomfunc = _geomselect\n condition = _where\n key = _groupby\n n = _limit\n \n # first yield header as list of column names\n colnames = [each[0] for each in columnfuncs]\n yield colnames\n\n # make an iterable that yields every combinaion of all input iterables' items\n if len(iterables) == 1:\n iterable = iterables[0]\n else:\n iterable = itertools.product(*iterables)\n\n # iterate and add\n if key:\n groups = groupby(iterable, key)\n\n # limit\n if n:\n groups = limit(groups, n)\n \n for items in groups:\n # filter\n if condition:\n items = where(items, condition)\n \n # aggregate\n # NOTE: columnfuncs and geomfunc must expect an iterable as input and return a single row,geom pair\n item = aggreg(items, columnfuncs, geomfunc)\n yield item\n \n else:\n # filter\n if condition:\n iterable = where(iterable, condition)\n\n # limit\n if n:\n iterable = limit(iterable, n)\n\n # select\n for item in select(iterable, columnfuncs, geomfunc):\n yield item", "def execute_query(\n # TODO: Passing the whole clickhouse query here is needed as long\n # as the execute method depends on it. Otherwise we can make this\n # file rely either entirely on clickhouse query or entirely on\n # the formatter.\n clickhouse_query: Union[Query, CompositeQuery[Table]],\n request_settings: RequestSettings,\n formatted_query: FormattedQuery,\n reader: Reader,\n timer: Timer,\n stats: MutableMapping[str, Any],\n query_settings: MutableMapping[str, Any],\n robust: bool,\n) -> Result:\n # Experiment, if we are going to grab more than X columns worth of data,\n # don't use uncompressed_cache in ClickHouse.\n uc_max = state.get_config(\"uncompressed_cache_max_cols\", 5)\n assert isinstance(uc_max, int)\n column_counter = ReferencedColumnsCounter()\n column_counter.visit(clickhouse_query.get_from_clause())\n if column_counter.count_columns() > uc_max:\n query_settings[\"use_uncompressed_cache\"] = 0\n\n # Force query to use the first shard replica, which\n # should have synchronously received any cluster writes\n # before this query is run.\n consistent = request_settings.get_consistent()\n stats[\"consistent\"] = consistent\n if consistent:\n query_settings[\"load_balancing\"] = \"in_order\"\n query_settings[\"max_threads\"] = 1\n\n result = reader.execute(\n formatted_query,\n query_settings,\n with_totals=clickhouse_query.has_totals(),\n robust=robust,\n )\n\n timer.mark(\"execute\")\n stats.update(\n {\"result_rows\": len(result[\"data\"]), \"result_cols\": len(result[\"meta\"])}\n )\n\n return result", "def batched_query(self, sql):\r\n\r\n result_sets = []\r\n messages = \"\"\r\n query = []\r\n last_query=\"\"\r\n\r\n batches = re.split(\"^\\s*(GO(?:\\s+[0-9]+)?)\\s*(?:--.*)?$\",sql,flags=re.M|re.I)\r\n # print(batches)\r\n for b in batches:\r\n if b.upper() == \"GO\":\r\n # execute one\r\n query.append(last_query)\r\n continue\r\n else:\r\n match = re.match(\"^GO\\s+([0-9]+)$\",b,re.I)\r\n if match is not None:\r\n #execute many\r\n for i in range(0,int(match.group(1))):\r\n query.append(last_query)\r\n else:\r\n # not a Go statment\r\n last_query = b\r\n query.append(last_query)\r\n\r\n # print(query)\r\n for q in query:\r\n r = self.query(q)\r\n if r is not None:\r\n result_sets.extend(r)\r\n messages += self.messages\r\n\r\n self.messages = messages\r\n return result_sets", "def _sql_aggregate(self, sql_function, keys=None, **kwds_filter):\n # TODO: _sql_aggregate has grown messy after a handful of\n # iterations look to refactor it in the future to improve\n # maintainability.\n if not _is_nsiterable(sql_function):\n sql_function = (sql_function,)\n\n if keys == None:\n sql_function = ', '.join(sql_function)\n cursor = self._execute_query(sql_function, **kwds_filter)\n result = cursor.fetchone()\n if len(result) == 1:\n return result[0]\n return result # <- EXIT!\n\n if not _is_nsiterable(keys):\n keys = (keys,)\n group_clause = [self._normalize_column(x) for x in keys]\n group_clause = ', '.join(group_clause)\n\n select_clause = '{0}, {1}'.format(group_clause, ', '.join(sql_function))\n trailing_clause = 'GROUP BY ' + group_clause\n\n cursor = self._execute_query(select_clause, trailing_clause, **kwds_filter)\n pos = len(sql_function)\n iterable = ((row[:-pos], getvals(row)) for row in cursor)\n if pos > 1:\n # Gets values by slicing (i.e., row[-pos:]).\n iterable = ((row[:-pos], row[-pos:]) for row in cursor)\n else:\n # Gets value by index (i.e., row[-pos]).\n iterable = ((row[:-pos], row[-pos]) for row in cursor)\n return CompareDict(iterable, keys)", "def _sql_aggregate(self, sql_function, keys=None, **kwds_filter):\n # TODO: _sql_aggregate has grown messy after a handful of\n # iterations look to refactor it in the future to improve\n # maintainability.\n if not nonstringiter(sql_function):\n sql_function = (sql_function,)\n\n if keys == None:\n sql_function = ', '.join(sql_function)\n cursor = self._execute_query(sql_function, **kwds_filter)\n result = cursor.fetchone()\n if len(result) == 1:\n return result[0]\n return result # <- EXIT!\n\n if not nonstringiter(keys):\n keys = (keys,)\n group_clause = [self._normalize_column(x) for x in keys]\n group_clause = ', '.join(group_clause)\n\n select_clause = '{0}, {1}'.format(group_clause, ', '.join(sql_function))\n trailing_clause = 'GROUP BY ' + group_clause\n\n cursor = self._execute_query(select_clause, trailing_clause, **kwds_filter)\n pos = len(sql_function)\n iterable = ((row[:-pos], getvals(row)) for row in cursor)\n if pos > 1:\n # Gets values by slicing (i.e., row[-pos:]).\n iterable = ((row[:-pos], row[-pos:]) for row in cursor)\n else:\n # Gets value by index (i.e., row[-pos]).\n iterable = ((row[:-pos], row[-pos]) for row in cursor)\n return CompareDict(iterable, keys)", "def _multi_query_execution(self):\n multi_query_staging = self.query.split(';')\n for query in multi_query_staging:\n self.query = query\n self._execute_command()", "def create_query(window,con,input_table_name,output_table_name,input_columns, stat_columns):\n sql = \"CREATE TABLE {} AS \".format(output_table_name)\n sql = sql + \"SELECT\" \n for input_column in input_columns:\n sql = sql + \" {},\".format(input_column)\n for stat_column in stat_columns:\n sql = sql + \" {},\".format(stat_column)\n for stat_column in stat_columns:\n sql = sql + \" AVG({}) OVER(PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS ma{:02.0f}_{},\".format(stat_column,window-1,window,stat_column)\n sql = sql + \" MIN({}) OVER(PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS min{:02.0f}_{},\".format(stat_column,window-1,window,stat_column)\n sql = sql + \" MAX({}) OVER(PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS max{:02.0f}_{},\".format(stat_column,window-1,window,stat_column)\n sql = sql + \" regr_slope({},year) OVER (PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS slope{:02.0f}_{},\".format(stat_column,window-1,window,stat_column)\n sql = sql + \" regr_intercept({},year) OVER (PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS intercept{:02.0f}_{},\".format(stat_column,window-1,window,stat_column)\n sql = sql + (\" regr_slope({},year) OVER (PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) * year \"\n \"+ regr_intercept({},year) OVER (PARTITION BY pfafid_30spfaf06, month, temporal_resolution ORDER BY year ROWS BETWEEN {:01.0f} PRECEDING AND CURRENT ROW) AS ols{:02.0f}_{},\".format(stat_column,window-1,stat_column,window-1,window,stat_column))\n \n \n sql = sql[:-1]\n sql = sql + \" FROM {}\".format(input_table_name)\n return sql", "def process(self, inputs):\n\n input_df = inputs[0]\n str_list = []\n for column_item in self.conf:\n column_name = column_item['column']\n if 'min' in column_item:\n minValue = column_item['min']\n str_item = '%s >= %f' % (column_name, minValue)\n str_list.append(str_item)\n if 'max' in column_item:\n maxValue = column_item['max']\n str_item = '%s <= %f' % (column_name, maxValue)\n str_list.append(str_item)\n input_df = input_df.query(\" and \".join(str_list))\n return input_df", "def setupResults(sql, limit=1000):\n # log the incoming query\n LOG(sql)\n\n #x_internal is the list for the iteration over one search result, x the overall list (which is deduplicated in the end)\n \n molecules = []\n molecules_internal = []\n atoms = []\n atoms_internal = []\n sources = []\n sources_internal = []\n particles = []\n electron_particle = Particle('electron')\n\n inchiconvertedsearch = False\n\n #define the last modified header with an old date. we will compare all timestamps to this and take the most recent one\n lastmodifiedheader = datetime.datetime(1970, 01, 01, 01, 01)\n\n #use the function sql2Q provided by vamdctap to translate from query to Q-object\n q = sql2Q(sql)\n\n #create queryset for energyscans according to query\n energyscans = models.Energyscan.objects.filter(q)\n\n # count the number of matches\n nenergyscans = energyscans.count()\n \n #in case somebody is searching for a InchiKey and it didn't bring up any results:\n #convert the inchikey to an inchi, extract the sum formula and try again\n if nenergyscans == 0:\n if re.search('InchiKey', str(sql)) is not None:\n match = re.search('[A-Z]{14}-[A-Z]{10}-[A-Z]', str(sql))\n if match is not None:\n inchikey = str(sql)[match.start():match.end()]\n chemical_formula = inchikey2chemicalformula(inchikey)\n\n #now we extracted the stochiometric / chemical formula from the inchi. \n #let's see if there is something in the DB\n if chemical_formula is not None:\n energyscans = models.Energyscan.objects.filter(Q(species__chemical_formula__exact=chemical_formula)|Q(origin_species__chemical_formula__exact=chemical_formula))\n nenergyscans = energyscans.count()\n inchiconvertedsearch = True\n\n #append electron if there are results:\n if nenergyscans != 0:\n particles.append(electron_particle)\n\n #loop over energyscans that came back\n\n for energyscan in energyscans:\n #compare if lastmodified is newer than then newest we have already included\n if energyscan.lastmodified > lastmodifiedheader:\n lastmodifiedheader = energyscan.lastmodified\n\n #our reactants are always molecules. here we check if the product is a molecule.\n if energyscan.species.molecule:\n molecules_internal = models.Species.objects.filter(Q(id__exact=energyscan.species.id)|Q(id__exact=energyscan.origin_species.id))\n else:\n atoms_internal = models.Species.objects.filter(Q(id__exact=energyscan.species.id))\n molecules_internal = models.Species.objects.filter(Q(id__exact=energyscan.origin_species.id))\n\n energyscan.Products = models.Species.objects.filter(id__exact=energyscan.species.id)\n energyscan.Reactants = models.Species.objects.filter(id__exact=energyscan.origin_species.id)\n\n #this part is a bit tricky: we make a new species-object which we give the ID 'electron'. otherwise it is empty\n #then we use list on the queryset energyscan.Reactants to force it to be executed.\n #afterwards, we append the newly created electron instance of the class species\n\n #keep in mind, that we actually defined the particle electron further up in the Particle() class. it was instanciated in the beginning of this function under the object electron_particle\n \n electron = models.Species('electron', '', '', '', '')\n energyscan.Reactants = list(energyscan.Reactants.all())\n energyscan.Reactants.append(electron)\n\n #make products negative\n for product in energyscan.Products:\n for molecule in molecules_internal:\n if product.id == molecule.id:\n molecule.ioncharge = -1\n else:\n molecule.ioncharge = 0 \n for atom in atoms_internal:\n if product.id == atom.id:\n atom.ioncharge = -1\n else:\n atom.ioncharge = 0\n\n #calculate exact / nominal masses\n for atom in atoms_internal:\n if molecule.isotope is True:\n atom.exactmass = chemlib.chemicalformula2exactmass(atom.chemical_formula)\n\n for molecule in molecules_internal:\n if molecule.isotope is True:\n molecule.mass = chemlib.chemicalformula2exactmass(molecule.chemical_formula)\n\n #treat sources\n sources_internal = models.Source.objects.filter(id__exact=energyscan.source.id)\n for source in sources_internal:\n authorlist = []\n for author in source.authors.all():\n authorlist.append(u'%s, %s'%(author.lastname, author.firstname))\n\n source.author = authorlist\n\n #insert the standard-comment in addition to a possibly existing user-specific comment\n standardcomment = 'X-Values are measured with an energy resolution of %s eV. Therefore every shown peak is the original peak shape convoluted with our resolution. Energy scans are calibrated. Therefore we estimate an error of 0.1 eV' % energyscan.energyresolution \n\n if energyscan.comment != '':\n usercomment = energyscan.comment\n energyscan.comment = 'Comment of the Producer: ' + usercomment + ' Additional Comment: ' + standardcomment\n else:\n energyscan.comment = standardcomment \n\n #give warning when we converted inchikey to chemical formula for searching\n if inchiconvertedsearch is True:\n inchiwarning = 'WARNING: For this query, an InChI-Key was converted to a stoichiometric formula, because otherwise no results were obtained. '\n energyscan.comment = inchiwarning + energyscan.comment\n\n #prepare the origin data\n ES_list = energyscan.energyscan_data.split()\n k = 0\n x = []\n y = []\n for datapoint in ES_list:\n datapoint = datapoint.replace(',','.')\n #even -> x-value\n if k % 2 == 0:\n x.append(float(datapoint))\n #odd -> y-value\n else: \n y.append(float(datapoint))\n k = k + 1\n\n if len(x) != len(y):\n LOG('WARNING - number of x and y values is not equal')\n\n #create datasets\n energyscan.DataSets = []\n dataset = DataSet(energyscan.source.id, x, y, energyscan.productiondate, energyscan.y_units)\n dataset.description = 'crossSection'\n dataset.accuracytype = 'systematic'\n energyscan.DataSets.append(dataset)\n\n #here we combine the list for molecules, atoms and sources from this particular energyscan with the query-wide list and remove all duplicates\n #see http://stackoverflow.com/questions/1319338/combining-two-lists-and-removing-duplicates-without-removing-duplicates-in-orig\n molecules = molecules + list(set(molecules_internal) - set(molecules))\n atoms = atoms + list(set(atoms_internal) - set(atoms))\n sources = sources + list(set(sources_internal) - set(sources))\n\n #count species and sources in order to return it to the headerinfo\n\n nsources = len(sources)\n nmolecules = len(molecules)\n natoms = len(atoms)\n nspecies = natoms + nmolecules\n\n #Create the Last Modified header\n #the header must not be newer than now!\n if lastmodifiedheader > datetime.datetime.now():\n lastmodifiedheader = datetime.datetime.now()\n lastmodifiedheader = formatdate(time.mktime(lastmodifiedheader.timetuple()))\n\n # Create the header with some useful info. The key names here are\n # standardized and shouldn't be changed.\n headerinfo = {\\\n 'COUNT-SOURCES':nsources,\n 'COUNT-SPECIES':nspecies,\n 'COUNT-ATOMS':natoms,\n 'COUNT-MOLECULES':nmolecules,\n 'COUNT-COLLISIONS':nenergyscans,\n 'COUNT-STATES':0,\n 'COUNT-RADIATIVE':0,\n 'COUNT-NONRADIATIVE':0,\n 'LAST-MODIFIED':lastmodifiedheader,\n }\n\n # Return the data if it is not empty... The keynames are standardized. \n if nenergyscans > 0:\n return {'CollTrans':energyscans,\n 'Sources':sources,\n 'Atoms':atoms,\n 'Molecules':molecules,\n 'Particles':particles,\n 'HeaderInfo':headerinfo,\n }\n else:\n return {}", "def query(cls, transaction, expr, order=None, ascending=True, group=None, forUpdate=False, noWait=False, limit=None):\n kw = {}\n if order is not None:\n kw.update(OrderBy=order, Ascending=ascending)\n if group is not None:\n kw.update(GroupBy=group)\n if forUpdate:\n kw.update(ForUpdate=True)\n if noWait:\n kw.update(NoWait=True)\n if limit is not None:\n kw.update(Limit=limit)\n return cls._rowsFromQuery(\n transaction,\n Select(\n list(cls.table),\n From=cls.table,\n Where=expr,\n **kw\n ),\n None\n )", "def aggregate_query(self):\n raise NotImplementedError", "async def execute(\n self,\n db_name,\n sql,\n params=None,\n truncate=False,\n custom_time_limit=None,\n page_size=None,\n ):\n page_size = page_size or self.page_size\n\n def sql_operation_in_thread():\n conn = getattr(connections, db_name, None)\n if not conn:\n info = self.ds.inspect()[db_name]\n conn = sqlite3.connect(\n \"file:{}?immutable=1\".format(info[\"file\"]),\n uri=True,\n check_same_thread=False,\n )\n self.ds.prepare_connection(conn)\n setattr(connections, db_name, conn)\n\n time_limit_ms = self.ds.sql_time_limit_ms\n if custom_time_limit and custom_time_limit < self.ds.sql_time_limit_ms:\n time_limit_ms = custom_time_limit\n\n with sqlite_timelimit(conn, time_limit_ms):\n try:\n cursor = conn.cursor()\n cursor.execute(sql, params or {})\n max_returned_rows = self.max_returned_rows\n if max_returned_rows == page_size:\n max_returned_rows += 1\n if max_returned_rows and truncate:\n rows = cursor.fetchmany(max_returned_rows + 1)\n truncated = len(rows) > max_returned_rows\n rows = rows[:max_returned_rows]\n else:\n rows = cursor.fetchall()\n truncated = False\n except sqlite3.OperationalError as e:\n if e.args == ('interrupted',):\n raise InterruptedError(e)\n print(\n \"ERROR: conn={}, sql = {}, params = {}: {}\".format(\n conn, repr(sql), params, e\n )\n )\n raise\n\n if truncate:\n return rows, truncated, cursor.description\n\n else:\n return rows\n\n return await asyncio.get_event_loop().run_in_executor(\n self.executor, sql_operation_in_thread\n )", "def inference_sql(sql_dict, table_json, args, join_on_label=None):\n global globe_join_on_label_count\n globe_join_on_label_count = 0\n sql_dict['where'] = intersect_check(sql_dict['where'])\n groupby_list = []\n groupby_top = \"\"\n re_sql = \"select distinct \" if sql_dict['select'][0] else \"select \"\n orderby_sql, table_list, agg_in_order = (\"\", [], False)\n # if args.orderby_to_subquery and is_orderby_for_subquery(sql_dict): # v1.1\n # orderby_sql,table_list,agg_in_order = (\"\",[],False)\n # else:\n # orderby_sql,table_list,agg_in_order = create_order_by(sql_dict['orderBy'],sql_dict['limit'])\n\n # Get table info from select column\n for column in sql_dict['select'][1]:\n table = column[1][1][1].split('.')[0].lower()\n if not table in table_list:\n table_list.append(table)\n select_unit = select_unit_back(column)\n if not (column[0] or column[1][1][0]):\n groupby_list.append(select_unit)\n re_sql += select_unit + ' , '\n re_sql = re_sql[:-3]\n top_select_table_list = copy.deepcopy(table_list)\n # Add table info to select column\n break_idx, table_list, next_sql, sql_where, sql_having, orderby_sql_, next_table_list = get_where_column(sql_dict,\n table_list,\n 0, SQL_TOP,\n table_json,\n args)\n if break_idx < 0 or next_sql == SQL_TOP:\n orderby_sql, table_list_order, agg_in_order = create_order_by(sql_dict['orderBy'], sql_dict['limit'])\n for order_t in table_list_order:\n if order_t.lower() not in table_list:\n table_list.append(order_t.lower())\n\n if sql_dict['groupBy']: # V1.1:\n groupby_top = \" group by \" + col_unit_back(sql_dict['groupBy'][0])\n elif (len(groupby_list) != len(sql_dict['select'][1]) and groupby_list) or sql_having.strip() != '' or (\n agg_in_order and groupby_list) or orderby_sql_.strip():\n if args.group_for_exact_match and len(groupby_list) > 1:\n groupby_list = infer_group_for_exact_match(groupby_list, table_json)\n groupby_top = \" group by \" + \",\".join(groupby_list)\n\n orderby_sql += orderby_sql_\n from_table_net, table_fk_list = get_table_network(table_json, table_list, join_on_label)\n top_sql_list = [re_sql]\n re_sql += create_from_table(from_table_net, table_json['table_names_original'],\n table_json['table_column_names_original'], table_fk_list)\n top_sql_list.append(re_sql + sql_where + groupby_top + sql_having)\n\n if sql_dict['where']:\n while next_sql:\n table_list = next_table_list # []#V1.2\n if next_sql == SQL_TOP:\n sub_sql = \" \" + sql_dict['where'][break_idx][:-1] + \" \" + top_sql_list[0]\n table_list = top_select_table_list\n start_new_top_sql = True\n else:\n select_column = col_unit_back(sql_dict['where'][break_idx][3])\n sub_sql = \"select \" + select_column\n if sql_dict['where'][break_idx][3][1].split('.')[0].lower() not in table_list:\n table_list.append(sql_dict['where'][break_idx][3][1].split('.')[0].lower())\n start_new_top_sql = False\n\n break_idx, table_list, next_sql, sql_where, sql_having, orderby_sql_, next_table_list = get_where_column(\n sql_dict, table_list, break_idx + 1, next_sql, table_json, args)\n if args.orderby_to_subquery and not orderby_sql_:\n orderby_sql_, table_list = orderby_to_subquery(sql_dict, table_list) # v1.1\n\n # if not start_new_top_sql:\n from_table_net, table_fk_list = get_table_network(table_json, table_list, join_on_label)\n sub_sql += create_from_table(from_table_net, table_json['table_names_original'],\n table_json['table_column_names_original'], table_fk_list)\n\n # if sql_where.strip() != 'where':\n sub_sql += sql_where\n\n if not start_new_top_sql:\n # if (sql_having.strip() and select_column) or (orderby_sql_.strip() and select_column):#v1.0\n if (sql_having.strip() and select_column) or ((\n \"max(\" in orderby_sql_ or \"min(\" in orderby_sql_ or \"count(\" in orderby_sql_ or \"sum(\" in orderby_sql_ or \"avg(\" in orderby_sql_) and select_column): # v1.0\n sub_sql += \" group by \" + select_column\n else:\n if groupby_top.strip():\n sub_sql += groupby_top\n elif (sql_having.strip() != '' and groupby_list) or (orderby_sql_.strip() and groupby_list):\n sub_sql += \" group by \" + \",\".join(groupby_list)\n\n sub_sql += sql_having + orderby_sql_\n\n if start_new_top_sql:\n top_sql_list.append(sub_sql)\n else:\n top_sql_list[len(top_sql_list) - 1] = top_sql_list[len(top_sql_list) - 1].replace('@@@', sub_sql, 1)\n\n re_sql = \"\"\n for idx, sql in enumerate(top_sql_list):\n if idx > 0:\n re_sql += sql\n\n re_sql += orderby_sql\n\n return re_sql", "def process_query(query):\n tokens = query.split(' ')\n keywords = ['UNION','MINUS','CROSS']\n if any(x in query for x in keywords):\n if 'UNION' in tokens:\n for i,token in enumerate(tokens):\n if token == 'UNION':\n table1 = process_query(' '.join(tokens[:i]))\n table2 = process_query(' '.join(tokens[i+1:]))\n table1.union(table2)\n result = DBTable()\n return result\n elif 'MINUS' in tokens:\n for i,token in enumerate(tokens):\n if token == 'MINUS':\n table1 = process_query(' '.join(tokens[:i]))\n table2 = process_query(' '.join(tokens[i+1:]))\n table1.setDifference(table2)\n result = DBTable()\n return result\n elif 'CROSS' in tokens:\n for i,token in enumerate(tokens):\n if token == 'CROSS':\n table1 = process_query(' '.join(tokens[:i]))\n table2 = process_query(' '.join(tokens[i+1:]))\n table1.cartesianProduct(table2)\n result = DBTable()\n return result\n\n \n else:\n # Query without the keywords UNION, MINUS or CROSS\n cols = tokens[1]\n table_no = tables_dict[tokens[3]]\n if cols == '*':\n cols = tables[table_no].columnNames\n else:\n cols = cols.split(',')\n if 'WHERE' in tokens:\n for i,t in enumerate(tokens):\n if t == 'WHERE':\n c = Clause()\n c.operand1 = tokens[i+1]\n c.operator = tokens[i+2]\n c.operand2 = tokens[i+3]\n result = tables[table_no].selection(c)\n return result.projection(cols)\n return tables[table_no].projection(cols)", "def query(self, query, *variables, limit=None):\n self.cursor.execute(query, variables)\n try:\n if limit:\n return self.cursor.fetchmany(limit)\n else:\n return self.cursor.fetchall()\n except psycopg2.ProgrammingError:\n return None", "def seq_query():\n query_type = input(\n '1.Specific fragment\\n'\n '2.Specific Organism\\n'\n '3.Specific gene\\n'\n '4.All\\n'\n '5.All cds\\n'\n )\n organize = input('Organize output?(y/n)\\n')\n if query_type not in ['1', '2', '3', '4', '5']:\n raise ValueError('wrong input!\\n')\n con = sqlite3.connect('./data/DB')\n cur = con.cursor()\n if query_type == '1':\n organism = input('Organism:\\n')\n gene = input('Gene:\\n')\n frag_type = input('Fragment type(gene, cds, rRNA, tRNA, exon, intron, spacer):\\n')\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence FROM main WHERE Name LIKE ? AND Type = ? AND Organism=?',\n ('%' + gene + '%', frag_type, organism))\n result = cur.fetchall()\n elif query_type == '2':\n organism = input('Organism:\\n')\n frag_type = input('Fragment type(gene, cds, rRNA, tRNA, exon, intron, spacer, whole, fragments):\\n')\n if frag_type == 'fragments':\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main WHERE Organism = ? ORDER BY Head',\n (organism,))\n else:\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main WHERE Organism LIKE ? AND Type = ? ORDER BY Head',\n ('%' + organism + '%', frag_type))\n result = cur.fetchall()\n elif query_type == '3':\n gene = input('Gene:\\n')\n frag_type = input('Fragment type(gene, cds, rRNA, tRNA, exon, intron, spacer):\\n')\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence FROM main WHERE Name LIKE ? AND Type = ? ORDER BY Taxon',\n ('%' + gene + '%', frag_type))\n result = cur.fetchall()\n elif query_type == '4':\n cur.execute('SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main ORDER BY Taxon')\n result = cur.fetchall()\n elif query_type == '5':\n cur.execute(\n 'SELECT Taxon, Organism, Name, Type, Strand, Sequence, Head FROM main WHERE type = \"cds\" ORDER BY Taxon')\n result = cur.fetchall()\n\n query_result = []\n for i in result:\n title = '{0}|{1}|{2}|{3}'.format(i[0], i[1], i[2], i[3])\n sequence = MutableSeq(i[5])\n gene = i[2]\n if i[4] == '-1':\n sequence.seq = sequence.reverse_complement()\n record = [title, gene, sequence]\n query_result.append(record)\n\n if organize == 'y':\n if not exists('output'):\n makedirs('output')\n for i in query_result:\n file_name = 'output/{0}.fasta'.format(i[1].replace('/', ''))\n with open(file_name, 'a') as output_file:\n output_file.write('>{0}\\n{1}\\n'.format(i[0], i[2]))\n else:\n output = input('Enter output filename:\\n')\n with open('{0}.fasta'.format(output), 'w') as output_file:\n for i in query_result:\n output_file.write('>{0}\\n{1}\\n'.format(i[0], i[2]))\n\n cur.close()\n con.close()\n print('Done.\\n')", "def _initiate_subquery(self, query, resource, offset, limit, sorts,\n supported, strict=True):\n record_class = resource.model\n schema = resource.make_schema()\n id_keys = schema.id_keys\n if sorts:\n order_bys = []\n for sort in sorts:\n try:\n order_bys = self._get_order_bys(\n record_class, [sort], resource.convert_key_name)\n except AttributeError:\n if strict:\n raise resource.make_error(\n \"invalid_sort_field\", field=sort.attr)\n else:\n order_bys = []\n for attr_name in id_keys:\n order_bys.append(\n getattr(\n record_class,\n attr_name).asc()\n )\n if limit is not None and limit < 0:\n if strict:\n raise resource.make_error(\"invalid_limit_value\", limit=limit)\n limit = None\n if offset is not None and offset < 0:\n if strict:\n raise resource.make_error(\"invalid_offset_value\",\n offset=offset)\n offset = None\n if limit or offset:\n if supported:\n # Use row_number to figure out which rows to pull\n row_number = func.row_number().over(\n order_by=order_bys\n ).label(\"row_number\")\n query = query.add_columns(row_number)\n # limit and offset handling\n start = 1\n if offset is not None:\n start = offset + 1\n end = None\n if limit is not None:\n end = start + limit - 1\n # Remove row_number from select list\n # NOTE - Not sure if there's a better way to do this...\n entities = []\n for col in query.column_descriptions:\n if col[\"name\"] != \"row_number\":\n entities.append(col[\"expr\"])\n # Query from self, allowing us to filter by row_number\n # Only include non row_number expressions in SELECT\n query = query.from_self(*entities)\n if start:\n query = query.filter(row_number >= start)\n if end:\n query = query.filter(row_number <= end)\n order_bys = [row_number]\n else:\n # Unable to use row_number, so unfortunately we have to\n # run an actual query with limit/offset/order applied,\n # and use those results to build our new query.\n # Super inefficient.\n temp_query = query\n for order_by in order_bys:\n temp_query = temp_query.order_by(order_by)\n if limit:\n temp_query = self.apply_limit(temp_query, limit)\n if offset:\n temp_query = self.apply_offset(temp_query, offset)\n results = temp_query.all()\n if len(id_keys) > 1:\n filters = []\n for result in results:\n conditions = []\n for id_key in id_keys:\n conditions.append(\n getattr(record_class, id_key) ==\n getattr(result, id_key)\n )\n filters.append(\n and_(*conditions)\n )\n if filters:\n query = query.filter(or_(*filters))\n else:\n # in condition\n id_key = id_keys[0]\n values = [getattr(r, id_keys[0]) for r in results]\n if values:\n query = query.filter(\n getattr(record_class, id_key).in_(values))\n query = query.from_self()\n for order_by in order_bys:\n query = query.order_by(order_by)\n return query", "def run(self):\n query = self.query\n\n # count before filtering\n # self.cardinality = query.add_columns(self.columns[0].sqla_expr).count()\n\n self._set_column_filter_expressions()\n self._set_global_filter_expression()\n self._set_sort_expressions()\n self._set_yadcf_data(query)\n\n # apply filters\n query = query.filter(\n *[e for e in self.filter_expressions if e is not None])\n self.filtered_query = deepcopy(query)\n\n # self.cardinality_filtered = query.add_columns(\n # self.columns[0].sqla_expr).count()\n\n # apply sorts\n query = query.order_by(\n *[e for e in self.sort_expressions if e is not None])\n\n # add paging options\n length = int(self.params.get('length'))\n if length >= 0:\n query = query.limit(length)\n elif length == -1:\n pass\n else:\n raise(ValueError(\n 'Length should be a positive integer or -1 to disable'))\n query = query.offset(int(self.params.get('start')))\n\n # add columns to query\n query = query.add_columns(\n *[c.sqla_expr for c in self.columns])\n\n self.filtered_query = self.filtered_query.add_columns(\n *[c.sqla_expr for c in self.columns])\n\n self.query = query\n # fetch the result of the queries\n column_names = [col.mData if col.mData else str(i)\n for i, col in enumerate(self.columns)]\n # self.results = [{k: v for k, v in zip(\n # column_names, row)} for row in query.all()]", "def oreOutputQuery(inputs):\n\n # TODO: This is set up to work with only ONE crop/target category; it must be changed to allow for multiple...\n\n conn = sqlite3.connect(db)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n\n category = inputs['exp_category']\n activities = inputs['exp_scenario']['Activity']\n app_eqips = inputs['exp_scenario']['AppEquip']\n app_types = inputs['exp_scenario']['AppType']\n formulations = inputs['exp_scenario']['Formulation']\n\n params = [category]\n\n def query_generator(exp_scenario, exp_scenario_list):\n\n query_string = exp_scenario + \" = ?\" # E.g. \"Activity = ?\"\n i = 0\n while i < len(exp_scenario_list):\n params.append(exp_scenario_list[i]) # append item to params[] to pass to SQL statement\n if i > 0: # skip 1st list item bc it is handle by default in the 'query_string' string definition\n query_string += \" OR \" + exp_scenario + \" = ?\" # E.g. \"Activity = ? OR Activity = ? OR Activity = ?\"\n i += 1\n return query_string\n\n sql_query = 'SELECT * FROM OccHandlerNC WHERE Category = ? ' \\\n 'AND (' + query_generator('Activity', activities) + ') ' \\\n 'AND (' + query_generator('AppEquip', app_eqips) + ') ' \\\n 'AND (' + query_generator('AppType', app_types) + ') ' \\\n 'AND (' + query_generator('Formulation', formulations) +')'\n\n # TreatedVal, TreatedUnit, DUESLNoG, DUESLG, DUEDLG, DUESLGCRH, DUEDLGCRH, IUENoR, IUEPF5R, IUEPF10R, IUEEC\n\n\n\n c.execute(sql_query, tuple(params))\n\n query = c.fetchall()\n conn.close() # Close 'row_factory' connection\n\n return query", "def _run_query(query, records):\n global_state = query.initial_global_state()\n params = query.derive_sample_params(global_state)\n sample_state = query.initial_sample_state(global_state, next(iter(records)))\n for record in records:\n sample_state = query.accumulate_record(params, sample_state, record)\n result, _ = query.get_query_result(sample_state, global_state)\n return result", "def _query_iterator(result, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None):\n\n while True:\n data = result.fetchmany(chunksize)\n if not data:\n break\n else:\n yield _wrap_result(data, columns, index_col=index_col,\n coerce_float=coerce_float,\n parse_dates=parse_dates)", "def multi_query(db, queries):\n return pd.concat((query_to_df(db, q) for q in queries), ignore_index=True)", "def aggregate_results(output_files, agg_filename):\n\n print(file_marker + \"STARTING AGGREGATION\")\n feather_files = output_files\n\n results = []\n for i in range(len(feather_files)):\n print(file_marker + str(i))\n x = pd.read_feather(feather_files[i])\n results.append(x)\n \n overall_results = pd.concat(results, ignore_index=True, sort=False)\n opt_diff_results = overall_results\n\n opt_diff_results.reset_index(inplace=True, drop=True) \n # drop=True: column 'index' gets removed\n\n opt_diff_results.to_feather(agg_filename)\n print(file_marker + \"Aggregated results saved to: \" + agg_filename)", "def queries_iter(self, input):\n\n queries = self._split(input)\n while queries:\n for sql in queries:\n delimiter = self._delimiter\n sql = queries.pop(0)\n if sql.endswith(delimiter):\n trailing_delimiter = True\n sql = sql.strip(delimiter)\n else:\n trailing_delimiter = False\n\n yield sql\n\n # if the delimiter was changed by the last command,\n # re-split everything, and if we previously stripped\n # the delimiter, append it to the end\n if self._delimiter != delimiter:\n combined_statement = ' '.join([sql] + queries)\n if trailing_delimiter:\n combined_statement += delimiter\n queries = self._split(combined_statement)[1:]", "def _expand_query_list(session, queries, recursive=False, verbose=False):\n results = []\n\n # If no queries are supplied by the user, default to a query for the\n # current working directory\n if len(queries) == 0:\n queries = [get_cwd()]\n\n # Wildcard expansion is performed first, so it can be combined with other types\n # of expansion, such as recursive expansion of subcollections later. Each collection\n # or data object is expanded only once.\n preprocessed_queries = []\n already_expanded = {}\n for query in queries:\n # Currently only wildcards without a collection path are supported\n # e.g. \"*.dat\", but not \"../*.dat\" or \"*/data.dat\".\n if \"/\" not in query and (\"?\" in query or \"*\" in query):\n for d in get_dataobjects_in_collection(session, get_cwd()):\n if fnmatch(d[\"name\"],\n query) and d[\"full_name\"] not in already_expanded:\n preprocessed_queries.append(d[\"full_name\"])\n already_expanded[d[\"full_name\"]] = 1\n for c in get_direct_subcollections(session, get_cwd()):\n parent, coll = os.path.split(c[\"name\"])\n if fnmatch(coll, query) and d[\"name\"] not in already_expanded:\n preprocessed_queries.append(c[\"name\"])\n already_expanded[d[\"name\"]] = 1\n else:\n preprocessed_queries.append(query)\n\n for query in preprocessed_queries:\n absquery = convert_to_absolute_path(query)\n if collection_exists(session, absquery):\n results.append({\"original_query\": query, \"expanded_query\": absquery,\n \"expanded_query_type\": \"collection\"})\n if verbose:\n print_debug(\"Argument \\\"{}\\\" is a collection.\".format(query))\n if recursive:\n for subcollection in get_subcollections(session, absquery):\n if verbose:\n print_debug(\"Recursively adding subcollection \" +\n subcollection + \" to queries.\")\n results.append({\"original_query\": query,\n \"expanded_query\": subcollection,\n \"expanded_query_type\": \"collection\"})\n elif dataobject_exists(session, absquery):\n results.append({\"original_query\": query, \"expanded_query\": absquery,\n \"expanded_query_type\": \"dataobject\"})\n if verbose:\n print_debug(\"Argument \\\"{}\\\" is a data object.\".format(query))\n else:\n print_error(\n \"Query \\\"{}\\\" could not be resolved. Ignoring ... \".format(query))\n\n return results", "def query(self):\n self.session_status()\n try:\n if self.keyword['limit'] == None:\n query = self.session.query(self.keyword['tablename']).filter(\n self.keyword['filter']).all()\n else:\n query = self.session.query(self.keyword['tablename']).filter(\n self.keyword['filter']).limit(self.keyword['limit']).all()\n except OperationalError:\n self.retry += 1\n if self.retry > self.max_retry_time:\n raise OperationalError, 'RETRY OUT'\n time.sleep(3)\n self.session.close()\n self.query()\n\n self.session.close()\n\n if not query:\n return []\n self.retry = 0\n return query" ]
[ "0.58031404", "0.568224", "0.5665828", "0.5647314", "0.5642808", "0.5589907", "0.5557054", "0.5428827", "0.54190207", "0.5345511", "0.5322713", "0.53156805", "0.5276109", "0.52561706", "0.5253461", "0.5250765", "0.52416295", "0.5235962", "0.52301556", "0.52289045", "0.52045006", "0.5195155", "0.51877767", "0.5176706", "0.51757056", "0.5174094", "0.51739395", "0.5169162", "0.516792", "0.51664495" ]
0.7461799
0
Tests the entrypoint with data passing with newstyled KFP components. This test case emulates a similar scenario as testMainWithV1Producer, except for that the inputs of this step are all provided by a newstyled KFP component.
def testMainWithV2Producer(self): # Set mocked user function. self._import_func.return_value = main.test_func2 # Set GFile read function self._mock_gcs_read.return_value = _PRODUCER_EXECUTOR_OUTPUT entrypoint.main( executor_metadata_json_file=_OUTPUT_METADATA_JSON_LOCATION, function_name='test_func2', test_param_input_param_metadata_file='gs://root/producer/executor_output_metadata.json', test_param_input_field_name='param_output', test_artifact_input_artifact_metadata_file='gs://root/producer/executor_output_metadata.json', test_artifact_input_output_name='artifact_output', test_output1_artifact_output_path='gs://root/consumer/output1', test_output2_parameter_output_path='gs://root/consumer/output2' ) self._mock_gcs_write.assert_called_with( path=_OUTPUT_METADATA_JSON_LOCATION, content=_EXPECTED_EXECUTOR_OUTPUT_1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testMainWithV1Producer(self):\n # Set mocked user function.\n self._import_func.return_value = main.test_func\n\n entrypoint.main(\n executor_metadata_json_file=_OUTPUT_METADATA_JSON_LOCATION,\n function_name='test_func',\n test_param_input_argo_param='hello from producer',\n test_artifact_input_path='gs://root/producer/output',\n test_output1_artifact_output_path='gs://root/consumer/output1',\n test_output2_parameter_output_path='gs://root/consumer/output2'\n )\n\n self._mock_gcs_write.assert_called_with(\n path=_OUTPUT_METADATA_JSON_LOCATION,\n content=_EXPECTED_EXECUTOR_OUTPUT_1)", "def run():\n \n logger = logging.getLogger(\"galah.sheep.producer\")\n \n\t# Initialize the correct producer based on the selected virtual suite.\n virtual_suite = get_virtual_suite(config[\"VIRTUAL_SUITE\"])\n producer = virtual_suite.Producer(logger)\n\n logger.info(\"Producer is starting\")\n \n # Loop until the program is shutting down\n while not universal.exiting:\n producer.produce_vm()", "def test_initialized_components(self):\n m, data = add_components_and_load_data(\n prereq_modules=IMPORTED_PREREQ_MODULES,\n module_to_test=MODULE_BEING_TESTED,\n test_data_dir=TEST_DATA_DIRECTORY,\n subproblem=\"\",\n stage=\"\",\n )\n instance = m.create_instance(data)\n\n # Load test data\n periods_df = pd.read_csv(\n os.path.join(TEST_DATA_DIRECTORY, \"inputs\", \"periods.tab\"), sep=\"\\t\"\n )\n timepoints_df = pd.read_csv(\n os.path.join(TEST_DATA_DIRECTORY, \"inputs\", \"timepoints.tab\"),\n sep=\"\\t\",\n usecols=[\n \"timepoint\",\n \"period\",\n \"number_of_hours_in_timepoint\",\n \"timepoint_weight\",\n ],\n )\n\n # PERIODS set\n expected_periods = periods_df[\"period\"].tolist()\n actual_periods = [p for p in instance.PERIODS]\n self.assertListEqual(\n expected_periods,\n actual_periods,\n msg=\"PERIODS set data does not load correctly.\",\n )\n # TODO: set index and convert to dict once\n # Param: discount_factor\n expected_discount_factor_param = periods_df.set_index(\"period\").to_dict()[\n \"discount_factor\"\n ]\n actual_discount_factor_param = {\n p: instance.discount_factor[p] for p in instance.PERIODS\n }\n self.assertDictEqual(\n expected_discount_factor_param,\n actual_discount_factor_param,\n msg=\"Data for param 'discount_factor' param \" \"not loaded correctly\",\n )\n # Param: period_start_year\n expected_period_start_year_param = periods_df.set_index(\"period\").to_dict()[\n \"period_start_year\"\n ]\n actual_period_start_year_param = {\n p: instance.period_start_year[p] for p in instance.PERIODS\n }\n self.assertDictEqual(\n expected_period_start_year_param,\n actual_period_start_year_param,\n msg=\"Data for param 'period_start_year' \" \"param not loaded correctly\",\n )\n\n # Param: period_end_year\n expected_period_end_year_param = periods_df.set_index(\"period\").to_dict()[\n \"period_end_year\"\n ]\n actual_period_end_year_param = {\n p: instance.period_end_year[p] for p in instance.PERIODS\n }\n self.assertDictEqual(\n expected_period_end_year_param,\n actual_period_end_year_param,\n msg=\"Data for param 'period_end_year' \" \"param not loaded correctly\",\n )\n\n # Param: hours_in_period_timepoints\n expected_hours_in_period_timepoints = periods_df.set_index(\"period\").to_dict()[\n \"hours_in_period_timepoints\"\n ]\n actual_hours_in_period_timepoints = {\n p: instance.hours_in_period_timepoints[p] for p in instance.PERIODS\n }\n self.assertDictEqual(\n expected_hours_in_period_timepoints,\n actual_hours_in_period_timepoints,\n msg=\"Data for param 'hours_in_period_timepoints' \"\n \"param not loaded correctly\",\n )\n\n # Params: period\n expected_period_param = timepoints_df.set_index(\"timepoint\").to_dict()[\"period\"]\n actual_period_param = {tmp: instance.period[tmp] for tmp in instance.TMPS}\n\n self.assertDictEqual(\n expected_period_param,\n actual_period_param,\n msg=\"Data for param 'period' not loaded correctly\",\n )\n\n # Set TMPS_IN_PRD\n expected_tmp_in_p = dict()\n for tmp in timepoints_df[\"timepoint\"].tolist():\n if expected_period_param[tmp] not in expected_tmp_in_p.keys():\n expected_tmp_in_p[expected_period_param[tmp]] = [tmp]\n else:\n expected_tmp_in_p[expected_period_param[tmp]].append(tmp)\n\n actual_tmps_in_p = {\n p: sorted([tmp for tmp in instance.TMPS_IN_PRD[p]])\n for p in list(instance.TMPS_IN_PRD.keys())\n }\n self.assertDictEqual(\n expected_tmp_in_p,\n actual_tmps_in_p,\n msg=\"TMPS_IN_PRD data do not match \" \"expected.\",\n )\n\n # Param: number_years_represented\n expected_num_years_param = {}\n for p in expected_periods:\n expected_num_years_param[p] = (\n expected_period_end_year_param[p] - expected_period_start_year_param[p]\n )\n actual_num_years_param = {\n p: instance.number_years_represented[p] for p in instance.PERIODS\n }\n self.assertDictEqual(\n expected_num_years_param,\n actual_num_years_param,\n msg=\"Data for param 'number_years_represented' \"\n \"param not loaded correctly\",\n )\n\n # Param: first_period\n expected_first_period = expected_periods[0]\n actual_first_period = instance.first_period\n self.assertEqual(expected_first_period, actual_first_period)\n\n # Set: NOT_FIRST_PRDS\n expected_not_first_periods = expected_periods[1:]\n actual_not_first_periods = [p for p in instance.NOT_FIRST_PRDS]\n self.assertListEqual(expected_not_first_periods, actual_not_first_periods)\n\n # Param: prev_period\n expected_prev_periods = {\n p: expected_periods[expected_periods.index(p) - 1]\n for p in expected_not_first_periods\n }\n actual_prev_periods = {\n p: instance.prev_period[p] for p in instance.NOT_FIRST_PRDS\n }\n self.assertDictEqual(expected_prev_periods, actual_prev_periods)\n\n # Param: hours_in_subproblem_period\n timepoints_df[\"tot_hours\"] = (\n timepoints_df[\"number_of_hours_in_timepoint\"]\n * timepoints_df[\"timepoint_weight\"]\n )\n expected_hours_in_subproblem_period = (\n timepoints_df.groupby([\"period\"])[\"tot_hours\"].sum().to_dict()\n )\n\n actual_hours_in_subproblem_period = {\n p: instance.hours_in_subproblem_period[p] for p in instance.PERIODS\n }\n self.assertDictEqual(\n expected_hours_in_subproblem_period,\n actual_hours_in_subproblem_period,\n msg=\"Data for param 'hours_in_subproblem_period' \"\n \"param not loaded correctly\",\n )", "def test_launch_composition(self):\n pass", "def test_valid_user_input(self):\n\n test_name = sys._getframe().f_code.co_name\n\n log.info(\"###### TEST EXECUTION STARTED :: \" + test_name + \" ######\")\n\n user_input = data_reader.get_data(test_name, \"Text_Message\")\n\n with allure.step(\"Verify User Input\"):\n result = self.main_page.verify_valid_user_input(user_input)\n self.exe_status.mark_final(test_step=test_name, result=result)", "def setUp(self):\n _, instance_path, shared_inputs = sys.argv\n app = lnt.server.ui.app.App.create_standalone(instance_path)\n app.testing = True\n self.client = app.test_client()\n self.shared_inputs = shared_inputs", "def test_complex_io_from_package_and_offering(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"inputs\": [\n {\n \"id\": \"complex_input_only_cwl_minimal\",\n \"label\": \"Complex Input Only CWL Minimal\",\n \"type\": \"File\"\n },\n {\n \"id\": \"complex_input_both_cwl_and_wps\",\n \"label\": \"Complex Input Both CWL and WPS - From CWL\",\n \"type\": \"File\"\n },\n ],\n \"outputs\": [\n {\n \"id\": \"complex_output_only_cwl_minimal\",\n \"label\": \"Complex Output Only CWL Minimal\",\n \"type\": \"File\",\n },\n {\n \"id\": \"complex_output_both_cwl_and_wps\",\n \"type\": \"File\"\n }\n ]\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n \"inputs\": [\n {\n \"id\": \"complex_input_only_wps_removed\",\n },\n {\n \"id\": \"complex_input_both_cwl_and_wps\",\n \"title\": \"Extra detail for I/O both in CWL and WPS\"\n }\n ],\n \"outputs\": [\n {\n \"id\": \"complex_output_only_wps_removed\"\n },\n {\n \"id\": \"complex_output_both_cwl_and_wps\",\n \"title\": \"Additional detail only within WPS output\"\n }\n ]\n }\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n desc, pkg = self.deploy_process(body, describe_schema=\"OLD\")\n proc = desc[\"process\"]\n\n assert proc[\"id\"] == self._testMethodName\n assert proc[\"title\"] == \"some title\"\n assert proc[\"description\"] == \"this is a test\"\n assert isinstance(proc[\"inputs\"], list)\n assert len(proc[\"inputs\"]) == 2\n assert proc[\"inputs\"][0][\"id\"] == \"complex_input_only_cwl_minimal\"\n assert proc[\"inputs\"][0][\"minOccurs\"] == 1\n assert proc[\"inputs\"][0][\"maxOccurs\"] == 1\n assert len(proc[\"inputs\"][0][\"formats\"]) == 1, \\\n \"Default format should be added to process definition when omitted from both CWL and WPS\"\n assert proc[\"inputs\"][0][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_TEXT_PLAIN\n assert proc[\"inputs\"][0][\"formats\"][0][\"default\"] is True\n assert proc[\"inputs\"][1][\"id\"] == \"complex_input_both_cwl_and_wps\"\n assert proc[\"inputs\"][1][\"minOccurs\"] == 1\n assert proc[\"inputs\"][1][\"maxOccurs\"] == 1\n assert len(proc[\"inputs\"][1][\"formats\"]) == 1, \\\n \"Default format should be added to process definition when omitted from both CWL and WPS\"\n assert proc[\"inputs\"][1][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_TEXT_PLAIN\n assert proc[\"inputs\"][1][\"formats\"][0][\"default\"] is True\n assert proc[\"inputs\"][1][\"title\"] == \"Extra detail for I/O both in CWL and WPS\", \\\n \"Additional details defined only in WPS matching CWL I/O by ID should be preserved\"\n assert isinstance(proc[\"outputs\"], list)\n assert len(proc[\"outputs\"]) == 2\n assert proc[\"outputs\"][0][\"id\"] == \"complex_output_only_cwl_minimal\"\n assert len(proc[\"outputs\"][0][\"formats\"]) == 1, \\\n \"Default format should be added to process definition when omitted from both CWL and WPS\"\n assert proc[\"outputs\"][0][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_TEXT_PLAIN\n assert proc[\"outputs\"][0][\"formats\"][0][\"default\"] is True\n assert proc[\"outputs\"][1][\"id\"] == \"complex_output_both_cwl_and_wps\"\n assert len(proc[\"outputs\"][1][\"formats\"]) == 1, \\\n \"Default format should be added to process definition when omitted from both CWL and WPS\"\n assert proc[\"outputs\"][1][\"formats\"][0][\"mediaType\"] == CONTENT_TYPE_TEXT_PLAIN\n assert proc[\"outputs\"][1][\"formats\"][0][\"default\"] is True\n assert proc[\"outputs\"][1][\"title\"] == \"Additional detail only within WPS output\", \\\n \"Additional details defined only in WPS matching CWL I/O by ID should be preserved\"\n\n assert len(pkg[\"inputs\"]) == 2\n assert pkg[\"inputs\"][0][\"id\"] == \"complex_input_only_cwl_minimal\"\n assert \"format\" not in pkg[\"inputs\"][0], \"Omitted formats in CWL and WPS I/O definitions during deployment\" \\\n \"should not add them to the generated CWL package definition\"\n assert pkg[\"inputs\"][1][\"id\"] == \"complex_input_both_cwl_and_wps\"\n # FIXME:\n # https://github.com/crim-ca/weaver/issues/31\n # https://github.com/crim-ca/weaver/issues/50\n # assert pkg[\"inputs\"][1][\"label\"] == \"Extra detail for I/O both in CWL and WPS\", \\\n # \"WPS I/O title should be converted to CWL label of corresponding I/O from additional details\"\n assert \"format\" not in pkg[\"inputs\"][1], \"Omitted formats in CWL and WPS I/O definitions during deployment\" \\\n \"should not add them to the generated CWL package definition\"\n assert len(pkg[\"outputs\"]) == 2\n assert pkg[\"outputs\"][0][\"id\"] == \"complex_output_only_cwl_minimal\"\n assert \"format\" not in pkg[\"outputs\"][0], \"Omitted formats in CWL and WPS I/O definitions during deployment\" \\\n \"should not add them to the generated CWL package definition\"\n assert pkg[\"outputs\"][1][\"id\"] == \"complex_output_both_cwl_and_wps\"\n # FIXME:\n # https://github.com/crim-ca/weaver/issues/31\n # https://github.com/crim-ca/weaver/issues/50\n # assert pkg[\"outputs\"][1][\"label\"] == \"Additional detail only within WPS output\", \\\n # \"WPS I/O title should be converted to CWL label of corresponding I/O from additional details\"\n assert \"format\" not in pkg[\"outputs\"][1], \"Omitted formats in CWL and WPS I/O definitions during deployment\" \\\n \"should not add them to the generated CWL package definition\"", "def test_multi_triggered_gbk_side_input(self):\n # TODO(https://github.com/apache/beam/issues/20065): Remove use of this\n # experiment. This flag is only necessary when using the multi-output\n # TestStream b/c it relies on using the PCollection output tags as the\n # PCollection output ids.\n with TestPipeline() as p:\n\n test_stream = (\n p\n | 'Mixed TestStream' >> TestStream().advance_watermark_to(\n 3,\n tag='main').add_elements(['a1'], tag='main').advance_watermark_to(\n 8, tag='main').add_elements(['a2'], tag='main').add_elements(\n [window.TimestampedValue(('k', 100), 2)], tag='side').\n add_elements([window.TimestampedValue(\n ('k', 400), 7)], tag='side').advance_watermark_to_infinity(\n tag='main').advance_watermark_to_infinity(tag='side'))\n\n main_data = (\n test_stream['main']\n | 'Main windowInto' >> beam.WindowInto(\n window.FixedWindows(5),\n accumulation_mode=trigger.AccumulationMode.DISCARDING))\n\n side_data = (\n test_stream['side']\n | 'Side windowInto' >> beam.WindowInto(\n window.FixedWindows(5),\n trigger=trigger.AfterWatermark(early=trigger.AfterCount(1)),\n accumulation_mode=trigger.AccumulationMode.DISCARDING)\n | beam.CombinePerKey(sum)\n | 'Values' >> Map(lambda k_vs: k_vs[1]))\n\n class RecordFn(beam.DoFn):\n def process(\n self,\n elm=beam.DoFn.ElementParam,\n ts=beam.DoFn.TimestampParam,\n side=beam.DoFn.SideInputParam):\n yield (elm, ts, side)\n\n records = (\n main_data\n | beam.ParDo(RecordFn(), beam.pvalue.AsList(side_data)))\n\n expected_window_to_elements = {\n window.IntervalWindow(0, 5): [\n ('a1', Timestamp(3), [100, 0]),\n ],\n window.IntervalWindow(5, 10): [('a2', Timestamp(8), [400, 0])],\n }\n\n assert_that(\n records,\n equal_to_per_window(expected_window_to_elements),\n use_global_window=False,\n label='assert per window')", "def test_literal_io_from_package_and_offering(self):\n cwl = {\n \"cwlVersion\": \"v1.0\",\n \"class\": \"CommandLineTool\",\n \"inputs\": [\n {\n \"id\": \"literal_input_only_cwl_minimal\",\n \"type\": \"string\"\n },\n {\n \"id\": \"literal_input_both_cwl_and_wps\",\n \"type\": \"string\"\n },\n ],\n \"outputs\": [\n {\n \"id\": \"literal_output_only_cwl_minimal\",\n \"type\": {\n \"type\": \"array\",\n \"items\": \"float\",\n }\n },\n {\n \"id\": \"literal_output_both_cwl_and_wps\",\n \"type\": \"float\"\n }\n ]\n }\n body = {\n \"processDescription\": {\n \"process\": {\n \"id\": self._testMethodName,\n \"title\": \"some title\",\n \"abstract\": \"this is a test\",\n \"inputs\": [\n {\n \"id\": \"literal_input_only_wps_removed\",\n },\n {\n \"id\": \"literal_input_both_cwl_and_wps\",\n \"title\": \"Extra detail for I/O both in CWL and WPS\"\n }\n ],\n \"outputs\": [\n {\n \"id\": \"literal_output_only_wps_removed\"\n },\n {\n \"id\": \"literal_output_both_cwl_and_wps\",\n \"title\": \"Additional detail only within WPS output\"\n }\n ]\n }\n },\n \"deploymentProfileName\": \"http://www.opengis.net/profiles/eoc/wpsApplication\",\n \"executionUnit\": [{\"unit\": cwl}],\n }\n desc, pkg = self.deploy_process(body, describe_schema=\"OLD\")\n proc = desc[\"process\"]\n\n assert proc[\"id\"] == self._testMethodName\n assert proc[\"title\"] == \"some title\"\n assert proc[\"description\"] == \"this is a test\"\n assert isinstance(proc[\"inputs\"], list)\n assert len(proc[\"inputs\"]) == 2\n assert proc[\"inputs\"][0][\"id\"] == \"literal_input_only_cwl_minimal\"\n assert proc[\"inputs\"][0][\"minOccurs\"] == 1\n assert proc[\"inputs\"][0][\"maxOccurs\"] == 1\n assert proc[\"inputs\"][1][\"id\"] == \"literal_input_both_cwl_and_wps\"\n assert proc[\"inputs\"][1][\"minOccurs\"] == 1\n assert proc[\"inputs\"][1][\"maxOccurs\"] == 1\n assert proc[\"inputs\"][1][\"title\"] == \"Extra detail for I/O both in CWL and WPS\", \\\n \"Additional details defined only in WPS matching CWL I/O by ID should be preserved\"\n assert isinstance(proc[\"outputs\"], list)\n assert len(proc[\"outputs\"]) == 2\n assert proc[\"outputs\"][0][\"id\"] == \"literal_output_only_cwl_minimal\"\n assert proc[\"outputs\"][1][\"id\"] == \"literal_output_both_cwl_and_wps\"\n assert proc[\"outputs\"][1][\"title\"] == \"Additional detail only within WPS output\", \\\n \"Additional details defined only in WPS matching CWL I/O by ID should be preserved\"\n\n assert len(pkg[\"inputs\"]) == 2\n assert pkg[\"inputs\"][0][\"id\"] == \"literal_input_only_cwl_minimal\"\n assert pkg[\"inputs\"][1][\"id\"] == \"literal_input_both_cwl_and_wps\"\n # FIXME:\n # https://github.com/crim-ca/weaver/issues/31\n # https://github.com/crim-ca/weaver/issues/50\n # assert pkg[\"inputs\"][1][\"label\"] == \"Extra detail for I/O both in CWL and WPS\", \\\n # \"WPS I/O title should be converted to CWL label of corresponding I/O from additional details\"\n assert len(pkg[\"outputs\"]) == 2\n assert pkg[\"outputs\"][0][\"id\"] == \"literal_output_only_cwl_minimal\"\n assert pkg[\"outputs\"][1][\"id\"] == \"literal_output_both_cwl_and_wps\"\n # FIXME:\n # https://github.com/crim-ca/weaver/issues/31\n # https://github.com/crim-ca/weaver/issues/50\n # assert pkg[\"outputs\"][1][\"label\"] == \"Additional detail only within WPS output\", \\\n # \"WPS I/O title should be converted to CWL label of corresponding I/O from additional details\"\n\n desc = self.describe_process(self._testMethodName, describe_schema=\"OGC\")\n assert desc[\"id\"] == self._testMethodName\n assert desc[\"title\"] == \"some title\"\n assert desc[\"description\"] == \"this is a test\"\n assert isinstance(desc[\"inputs\"], dict)\n assert len(desc[\"inputs\"]) == 2\n assert desc[\"inputs\"][\"literal_input_only_cwl_minimal\"][\"minOccurs\"] == 1\n assert desc[\"inputs\"][\"literal_input_only_cwl_minimal\"][\"maxOccurs\"] == 1\n assert desc[\"inputs\"][\"literal_input_both_cwl_and_wps\"][\"minOccurs\"] == 1\n assert desc[\"inputs\"][\"literal_input_both_cwl_and_wps\"][\"maxOccurs\"] == 1\n assert isinstance(desc[\"outputs\"], dict)\n assert len(desc[\"outputs\"]) == 2\n assert \"title\" not in desc[\"outputs\"][\"literal_output_only_cwl_minimal\"], \\\n \"No additional title provided should make the field to be omitted completely.\"\n assert desc[\"outputs\"][\"literal_output_both_cwl_and_wps\"][\"title\"] == \\\n \"Additional detail only within WPS output\", \\\n \"Additional details defined only in WPS matching CWL I/O by ID should be preserved.\"", "def test_main_required_args(self):\n args = [\n \"--layout\",\n self.layout_single_signed_path,\n \"--layout-keys\",\n self.alice_path,\n ]\n\n self.assert_cli_sys_exit(args, 0)", "def test_main_required_args(self):\n args = [\n \"--layout\",\n self.layout_single_signed_path,\n \"--layout-keys\",\n self.alice_path,\n ]\n\n self.assert_cli_sys_exit(args, 0)", "def test_main():\r\n\r\n ## Put any test code here\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--source_svc\", required=True)\r\n parser.add_argument(\"--target_svc\", required=True)\r\n parser.add_argument(\"--host\", required=True)\r\n args=vars(parser.parse_args())\r\n\r\n ## For this test I need the disk collections\r\n sourceVdiskCollection = getvdiskcollection(args[\"source_svc\"])\r\n #targetVdiskCollection = getvdiskcollection(args[\"target_svc\"])\r\n remove_vdisk_from_svc(args[\"source_svc\"], sourceVdiskCollection[\"web01_d01\"])\r\n remove_vdisk_from_svc(args[\"source_svc\"], sourceVdiskCollection[\"web01_r01\"])", "def test_85_entry_point(self):\n\t\tinput = \"\"\"procedure main(pa:boolean); var a:real;x:array[1 .. 10]of real;\n\t\tbegin a:=x[sq(2)]; fuc(); end\n\t\tfunction sq(m:integer):integer; begin return m*m; end\"\"\"\n\t\texpect = \"No entry point\"\n\t\tself.assertTrue(TestChecker.test(input,expect,485))", "def test_pipeline1(self):\n\n nlp = Pipeline[DataPack]()\n reader = SentenceReader()\n nlp.set_reader(reader)\n dummy = DummyPackProcessor()\n nlp.add(dummy)\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[PACK]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def test_keypoint_detection(init_env, config_file):\n run_all_steps(init_env, config_file)", "def test_main_pizza_parlour_two():\n inputs = [1, 2, 1, 1, 1, 1, 1, 2, 3, 3, 1, 2, 1, 2, 3, 2, 1, 2, 2, 2,\n 2, 1, 2, 1, 3, 1, 123, 123, 4, 1, 3, \"Corniche\", 8]\n\n mock_inputs = [str(i) for i in inputs]\n set_keyboard_input(mock_inputs)\n\n run()\n\n output = get_display_output()\n\n expected = \"\\t8. Exit the program\"\n\n assert output[-2] == expected", "def train_entry_point():", "def test_03_controlled_data(self):\n self.fc.flow_load_home_screen()\n self.home.select_settings_item()\n self.data_collection_settings.select_reset_btn()\n self.home.select_controlled_data_item()\n self.controlled_data.select_data_refresh_button()\n assert self.controlled_data.get_associated_device_product_number() == \"Missing Consent\"\n assert self.controlled_data.get_associated_device_uuid() == \"Missing Consent\"\n assert self.controlled_data.get_associated_stratus_user_uuid() == \"Missing Consent\"\n assert self.controlled_data.get_app_caid() == \"Missing Consent\"\n assert self.controlled_data.get_app_name() == \"Missing Consent\"\n self.driver.swipe(direction=\"down\")\n assert self.controlled_data.get_app_package_deployed_uuid() == \"Missing Consent\"\n assert self.controlled_data.get_app_package_id() == \"Missing Consent\"\n assert self.controlled_data.get_os_language() == \"Missing Consent\"\n self.driver.swipe(direction=\"up\")\n self.home.select_settings_item()\n self.data_collection_settings.enter_custom_printer_uuid(option=\"11111111-0000-0000-0000-000000000001\")\n self.data_collection_settings.enter_custom_app_instance_id(option=\"22222222-0000-0000-0000-000000000001\")\n self.data_collection_settings.select_save_btn()\n self.home.select_controlled_data_item()\n self.controlled_data.select_data_refresh_button()\n assert self.controlled_data.get_associated_device_product_number() != \"Missing Consent\"\n assert self.controlled_data.get_associated_device_uuid() != \"Missing Consent\"\n assert self.controlled_data.get_associated_stratus_user_uuid() != \"Missing Consent\"\n assert self.controlled_data.get_app_caid() != \"Missing Consent\"\n assert self.controlled_data.get_app_name() != \"Missing Consent\"\n self.driver.swipe(direction=\"down\")\n assert self.controlled_data.get_app_package_deployed_uuid() != \"Missing Consent\"\n assert self.controlled_data.get_app_version() != \"Missing Consent\"\n assert self.controlled_data.get_os_name() != \"Missing Consent\"\n self.driver.swipe(direction=\"up\")", "def _setup_vasp_inputs(self):\n\n # Set the kpoints (kpoints)\n if 'kpoints' in self.inputs:\n self.ctx.inputs.kpoints = self.inputs.kpoints\n elif 'kpoints_spacing' in self.inputs:\n kpoints = orm.KpointsData()\n kpoints.set_cell_from_structure(self.ctx.inputs.initial_structure)\n kpoints.set_kpoints_mesh_from_density(self.inputs.kpoints_spacing.value * np.pi * 2)\n self.ctx.inputs.kpoints = kpoints\n elif 'kpoints_spacing_vasp' in self.inputs:\n kpoints = orm.KpointsData()\n kpoints.set_cell_from_structure(self.ctx.inputs.initial_structure)\n kpoints.set_kpoints_mesh_from_density(self.inputs.kpoints_spacing.value)\n self.ctx.inputs.kpoints = kpoints\n else:\n raise InputValidationError(\"Must supply either 'kpoints' or 'kpoints_spacing' or 'kpoints_spacing_vasp\")\n\n # Set settings\n\n unsupported_parameters = dict(VTST_ADDITIONAL_TAGS)\n if 'settings' in self.inputs:\n self.ctx.inputs.settings = self.inputs.settings\n # Also check if the user supplied additional tags that is not in the supported file.\n try:\n unsupported_parameters = self.ctx.inputs.settings.unsupported_parameters\n except AttributeError:\n pass\n\n # Perform inputs massage to accommodate generalization in higher lying workchains\n # and set parameters.\n try:\n parameters_massager = ParametersMassage(self.inputs.parameters, unsupported_parameters)\n except Exception as exception: # pylint: disable=broad-except\n return self.exit_codes.ERROR_IN_PARAMETER_MASSAGER.format(exception=exception) # pylint: disable=no-member\n try:\n # Only set if they exists\n # Set any INCAR tags\n self.ctx.inputs.parameters = parameters_massager.parameters.incar\n # Set any dynamics input (currently only for selective dynamics, e.g. custom write to POSCAR)\n self.ctx.inputs.dynamics = parameters_massager.parameters.dynamics\n # Here we could set additional override flags, but those are not relevant for this VASP plugin\n except AttributeError:\n pass\n\n # Setup LDAU keys\n if 'ldau_mapping' in self.inputs:\n ldau_settings = self.inputs.ldau_mapping.get_dict()\n ldau_keys = get_ldau_keys(self.ctx.inputs.initial_structure, **ldau_settings)\n # Directly update the raw inputs passed to VaspCalculation\n self.ctx.inputs.parameters.update(ldau_keys)\n\n # Set settings\n if 'settings' in self.inputs:\n self.ctx.inputs.settings = self.inputs.settings\n\n # Set options\n # Options is very special, not storable and should be\n # wrapped in the metadata dictionary, which is also not storable\n # and should contain an entry for options\n if 'options' in self.inputs:\n options = {}\n options.update(self.inputs.options)\n self.ctx.inputs.metadata = {}\n self.ctx.inputs.metadata['options'] = options\n # Override the parser name if it is supplied by the user.\n parser_name = self.ctx.inputs.metadata['options'].get('parser_name')\n if parser_name:\n self.ctx.inputs.metadata['options']['parser_name'] = parser_name\n # Also make sure we specify the entry point for the\n # Set MPI to True, unless the user specifies otherwise\n withmpi = self.ctx.inputs.metadata['options'].get('withmpi', True)\n self.ctx.inputs.metadata['options']['withmpi'] = withmpi\n else:\n raise InputValidationError('`options` not supplied')\n\n # Utilise default input/output selections\n self.ctx.inputs.metadata['options']['input_filename'] = 'INCAR'\n\n # Set the CalcJobNode to have the same label as the WorkChain\n self.ctx.inputs.metadata['label'] = self.inputs.metadata.get('label', '')\n self.report(self.ctx.inputs.metadata)\n\n # Verify and set potentials (potcar)\n if not self.inputs.potential_family.value:\n self.report( # pylint: disable=not-callable\n 'An empty string for the potential family name was detected.'\n )\n return self.exit_codes.ERROR_NO_POTENTIAL_FAMILY_NAME # pylint: disable=no-member\n try:\n self.ctx.inputs.potential = get_data_class('vasp.potcar').get_potcars_from_structure(\n structure=self.inputs.initial_structure,\n family_name=self.inputs.potential_family.value,\n mapping=self.inputs.potential_mapping.get_dict()\n )\n except ValueError as err:\n return compose_exit_code(self.exit_codes.ERROR_POTENTIAL_VALUE_ERROR.status, str(err)) # pylint: disable=no-member\n except NotExistent as err:\n return compose_exit_code(self.exit_codes.ERROR_POTENTIAL_DO_NOT_EXIST.status, str(err)) # pylint: disable=no-member\n\n self.ctx.verbose = bool(self.inputs.get('verbose', self._verbose))\n\n return None", "def test_main_pizza_parlour_one():\n inputs = [\"1\", \"2\", \"1\", \"1\", \"1\", \"1\", \"1\", \"2\", \"3\", \"3\", \"1\", \"4\", \"1\",\n \"1\", \"8\"]\n set_keyboard_input(inputs)\n\n run()\n\n output = get_display_output()\n\n expected = \"\\t8. Exit the program\"\n assert output[-2] == expected", "def test_84_entry_point(self):\n\t\tinput = \"\"\"procedure mainn(); var a:real;x:array[1 .. 10]of real;\n\t\tbegin a:=x[sq(2)]; end\n\t\tfunction sq(m:integer):integer; begin return m*m; end\"\"\"\n\t\texpect = \"No entry point\"\n\t\tself.assertTrue(TestChecker.test(input,expect,484))", "def main():\n produce()", "def test_process_data(self):\n pass", "def test_pipeline2(self):\n\n nlp = Pipeline[DataPack]()\n reader = SentenceReader()\n nlp.set_reader(reader)\n dummy = DummmyFixedSizeBatchProcessor()\n config = {\"batcher\": {\"batch_size\": 4}}\n nlp.add(component=dummy, config=config)\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[BATCH]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def run(known_args, pipeline_args, sections, jobname):\n\n if known_args.testmode:\n # In testmode, disable cloud storage backup and run on directRunner\n pipeline_args.append('--runner=DirectRunner')\n else:\n pipeline_args.append('--runner=DataflowRunner')\n\n pipeline_args.extend([\n '--project=wikidetox',\n '--staging_location=gs://wikidetox-dataflow/staging',\n '--temp_location=gs://wikidetox-dataflow/tmp',\n '--job_name=extract-edits-{}'.format(jobname),\n '--num_workers=20',\n ])\n\n pipeline_options = PipelineOptions(pipeline_args)\n pipeline_options.view_as(SetupOptions).save_main_session = True\n with beam.Pipeline(options=pipeline_options) as p:\n pcoll = (p | \"GetDataDumpList\" >> beam.Create(sections))\n cloud_storage = (\"gs://wikidetox-dataflow/edits/{lan}-{date}\".\n format(lan=known_args.language, date=known_args.dumpdate))\n if known_args.download:\n pcoll = (pcoll |\n \"DownloadDataDumps\" >> beam.ParDo(DownloadDataDumps(), known_args.bucket))\n else:\n if known_args.ingest:\n if known_args.POVpages is not None:\n selected_pages = (p | \"GetSelectedPages\" >> beam.io.ReadFromText(known_args.POVpages)\n | beam.Map(lambda x: (eval(x)['chunk_name'], eval(x)['page_id'])))\n pcoll = (pcoll | beam.Map(lambda x: (x, x)))\n pipeline_input = ({\"selected_pages\": selected_pages, \"datadump\": pcoll} | beam.CoGroupByKey())\n else:\n pipeline_input = pcoll\n pipeline_input = ( pipeline_input |\n \"Ingestion\" >> beam.ParDo(IngestDumps(), known_args.bucket,\n known_args.ingestFrom, known_args.all_edits) |\n \"WriteIngested\" >> beam.io.WriteToText(path.join(\"gs://wikidetox-dataflow/ingested\", 'ingested-revisions-{}'.format(jobname))))\n else:\n error_log, rejects, improvments, non_rejects, sent_revises = ( p |\n \"ReadIngested\" >> beam.io.ReadFromText(path.join(\"gs://wikidetox-dataflow/ingested\", 'ingested-revisions-{}*'.format(jobname))) |\n \"GetDiffs\" >> beam.ParDo(WriteDecompressedFile()).with_outputs(\n 'rejects', 'improvments', 'non_rejects', 'sent_revises', main = 'error_log'))\n (sent_revises | \"SentRevisesToStorage\" >>\n beam.io.WriteToText(path.join(cloud_storage, 'sent_revise-{}'.format(jobname))))\n (rejects | \"RejectedToStorage\" >>\n beam.io.WriteToText(path.join(cloud_storage, 'rejected-{}'.format(jobname))))\n (improvments | \"InsertsToStorage\" >>\n beam.io.WriteToText(path.join(cloud_storage, 'improved-{}'.format(jobname))))\n (non_rejects | \"NonRejectsToStorage\" >>\n beam.io.WriteToText(path.join(cloud_storage, 'non_rejected-{}'.format(jobname))))\n (error_log | \"ERRORlog\" >>\n beam.io.WriteToText(path.join(cloud_storage, 'error_log-{}'.format(jobname))))", "def test_11(self, test):\r\n globalConfig.test = test\r\n inputList = getInputList()\r\n if len(inputList) == 0:\r\n return test.UNCLEAR(\"Not tested. No resources found.\")\r\n for inputInstance in inputList:\r\n channels = inputInstance.getChannelList()\r\n if len(channels) == 0:\r\n return test.FAIL(\"Inputs must have at least one channel\")\r\n return test.PASS()", "def testSampleOutput(self):\n beam_width = 3\n max_decode_length = 2\n\n smart_compose_model = model.create_smart_compose_model(self.embedding_layer_param, self.empty_url, self.min_len, self.max_len,\n beam_width, max_decode_length, self.feature_type_2_name, self.min_seq_prob,\n self.length_norm_power)\n\n # {'exist_prefix': True,\n # 'predicted_scores': [[-2.7357671, -2.7361841, -2.7503903]] (could vary due to random initialization),\n # 'predicted_texts': [[b'[CLS] build is', b'[CLS] build source', b'[CLS] build token']]}\n print(smart_compose_model.prefix_aware_beam_search({\n InputFtrType.TARGET_COLUMN_NAME: tf.constant(['bui'])\n }))\n # {'exist_prefix': True,\n # 'predicted_scores': [[-2.7357671, -2.7361841, -2.7503903]] (could vary due to random initialization),\n # 'predicted_texts': [[b'[CLS] build is', b'[CLS] build source', b'[CLS] build token']]}\n print(smart_compose_model.prefix_aware_beam_search({\n InputFtrType.TARGET_COLUMN_NAME: tf.constant(['build'])\n }))\n # {'exist_prefix': True,\n # 'predicted_scores': [[-2.7357671, -2.7361841, -2.7503903]] (could vary due to random initialization),\n # 'predicted_texts': [[b'build is [PAD]', b'build source [PAD]', b'build token [PAD]']]}\n print(smart_compose_model.prefix_aware_beam_search({\n InputFtrType.TARGET_COLUMN_NAME: tf.constant(['build '])\n }))\n # {'exist_prefix': True,\n # 'predicted_scores': [[-2.711434 , -2.7171993, -2.7329462]] (could vary due to random initialization),\n # 'predicted_texts': [[b'build function token', b'build function test', b'build function is']]\n print(smart_compose_model.prefix_aware_beam_search({\n InputFtrType.TARGET_COLUMN_NAME: tf.constant(['build f'])\n }))", "def test_components(self):\n\n good_cpts = self.good.components.components.copy()\n\n for cid, cpt in self.actual.components.components.items():\n goodcpt = good_cpts.pop(cid)\n\n self.assertEqual(cpt.name, goodcpt.name)\n self.assertEqual(cpt.attributes, goodcpt.attributes)\n self.assertEqual(len(cpt.symbols), 1)\n self.assertEqual(len(cpt.symbols[0].bodies), 1)\n\n body = cpt.symbols[0].bodies[0]\n goodbody = goodcpt.symbols[0].bodies[0]\n\n self.assertEqual(len(body.shapes), len(goodbody.shapes))\n for shape, goodshape in zip(body.shapes, goodbody.shapes):\n self.assertEqual(shape.__class__, goodshape.__class__)\n self.assertEqual(shape.json(), goodshape.json())\n\n self.assertEqual(len(body.pins), len(goodbody.pins))\n for pin, goodpin in zip(body.pins, goodbody.pins):\n self.assertEqual(pin.__class__, goodpin.__class__)\n self.assertEqual(pin.json(), goodpin.json())\n\n self.assertEqual(good_cpts, {})", "def TestOneStep(self):\n pass", "def runTest(self):\n false_change = EtcProposalsChangeStub(False)\n true_change = EtcProposalsChangeStub()\n true_type = EtcProposalChangeType(true_change)\n false_type = EtcProposalChangeType(false_change)\n self.testbox.pack_start(false_type, False, False, 1)\n self.testbox.pack_start(true_type, False, False, 1)\n gtk.main()\n self.failIf(self.Failed, 'Test failed.')" ]
[ "0.6532168", "0.54045933", "0.5387434", "0.538001", "0.5364076", "0.5347625", "0.5322951", "0.53143555", "0.52755845", "0.52549887", "0.52549887", "0.52510643", "0.5237177", "0.5225532", "0.5202899", "0.51837075", "0.51728654", "0.5167905", "0.5163144", "0.5133356", "0.5133047", "0.5127701", "0.51223403", "0.5120141", "0.51053846", "0.51040775", "0.507759", "0.50699466", "0.5058331", "0.5051433" ]
0.63917965
1
Run colordiff if it can be found, and plain diff otherwise.
def _run_diff(oldfile, newfile): # TODO: It may be nicer to use the internal diff engine for this. # For one, this would use the correct colors set up for hg # diff rather than the colors set up for colordiff. It's not # clear to me how this can be done though, and if it is # worth the bother. _call_subprocesses("diff or colordiff", ["colordiff", "-u", oldfile, newfile], ["diff", "-u", oldfile, newfile])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def diff(src, perm, dest, cmds, comp, verbose=False):\n if comp != Cmp.differ:\n return\n with open(src) as s, open(dest) as d:\n srcl, destl = list(s), list(d)\n out = unified_diff(destl, srcl, dest, src)\n colordiff(out)", "def diff(ctx, input_file):\n if input_file is None:\n click.echo(diff.get_help(ctx))\n return\n\n diff_color(input_file, ctx.obj)", "def diff(ctx, files, metrics, all, detail, revision, wrap):\n config = ctx.obj[\"CONFIG\"]\n\n if not exists(config):\n handle_no_cache(ctx)\n\n if not metrics:\n metrics = get_default_metrics(config)\n logger.info(f\"Using default metrics {metrics}\")\n else:\n metrics = metrics.split(\",\")\n logger.info(f\"Using specified metrics {metrics}\")\n\n from wily.commands.diff import diff\n\n logger.debug(f\"Running diff on {files} for metric {metrics}\")\n diff(\n config=config,\n files=files,\n metrics=metrics,\n changes_only=not all,\n detail=detail,\n revision=revision,\n wrap=wrap,\n )", "def black(context):\n exec_cmd = \"black --check --diff .\"\n run_cmd(context, exec_cmd)", "def test_simple_diff(self):\n diff = (\n b'diff --git a/cfg/testcase.ini b/cfg/testcase.ini\\n'\n b'index cc18ec8..5e70b73 100644\\n'\n b'--- a/cfg/testcase.ini\\n'\n b'+++ b/cfg/testcase.ini\\n'\n b'@@ -1,6 +1,7 @@\\n'\n b'+blah blah blah\\n'\n b' [mysql]\\n'\n b' host = localhost\\n'\n b' port = 3306\\n'\n b' user = user\\n'\n b' pass = pass\\n'\n b'-db = pyunit\\n'\n b'+db = pyunit\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'cfg/testcase.ini',\n orig_file_details=b'cc18ec8',\n modified_filename=b'cfg/testcase.ini',\n modified_file_details=b'5e70b73',\n old_unix_mode='100644',\n new_unix_mode='100644',\n insert_count=2,\n delete_count=1,\n data=diff)", "def diffcuv(ctx, input_files):\n assert len(input_files) == 2\n diff_coverage_files(input_files[0].name, input_files[1].name, ctx.obj)", "def test_filemode_diff(self):\n diff1 = (\n b'diff --git a/testing b/testing\\n'\n b'old mode 100755\\n'\n b'new mode 100644\\n'\n b'index e69de29..bcae657\\n'\n b'--- a/testing\\n'\n b'+++ b/testing\\n'\n b'@@ -0,0 +1 @@\\n'\n b'+ADD\\n'\n )\n diff2 = (\n b'diff --git a/testing2 b/testing2\\n'\n b'old mode 100644\\n'\n b'new mode 100755\\n'\n )\n diff = diff1 + diff2\n\n # NOTE: testing2 gets skipped, due to lack of changes we can\n # represent.\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'testing',\n orig_file_details=b'e69de29',\n modified_filename=b'testing',\n modified_file_details=b'bcae657',\n old_unix_mode='100755',\n new_unix_mode='100644',\n insert_count=1,\n data=diff1)", "def do_diff(sourcelist):\n for source in sourcelist:\n dc = filecmp.dircmp('output-pandoc/'+source, 'output-panzer/'+source)\n if dc.right_only or dc.left_only or dc.diff_files:\n print(pretty_title(source))\n if dc.right_only:\n print('* only in output-panzer/%s:' % source)\n for line in pretty_list(dc.right_only):\n print(' ' + line)\n if dc.left_only:\n print('* only in output-pandoc/%s:' % source)\n for line in pretty_list(dc.left_only):\n print(' ' + line)\n if dc.diff_files:\n print('* differing:')\n for line in pretty_list(dc.diff_files):\n print(' ' + line)", "def cmd_get_diff(base, target):\n return ['git', 'diff', base, target]", "def test_git(self):\n\n g_d = subprocess.check_output([\"zsh\", \"-i\", \"-c\", \"g d --help\"])\n self.assertIn(\n \"`git d' is aliased to \"\n \"`diff --ignore-all-space --ignore-blank-lines --word-diff=color'\",\n g_d,\n )", "def diffch(dir1,dir2,outfile=None):\n for ff in sorted(os.listdir(dir1)):\n if re.search('.c$',ff) or re.search('.h$',ff):\n f1 = dir1 + ff\n f2 = dir2 + ff\n if outfile is None:\n print 'start diff ',f1,f2\n os.system('diff %s %s' % (f1,f2))\n print 'end diff ',f1,f2\n else:\n ofp = open(outfile,'a')\n ofp.write('start diff %s %s\\n' % (f1,f2))\n ofp.close()\n os.system('diff %s %s >> %s' % (f1,f2,outfile))\n ofp = open(outfile,'a')\n ofp.write('end diff %s %s\\n' % (f1,f2))\n ofp.close()", "def test_patch(self, patch):\n self.clean()\n error = self.apply_patch(patch)\n diff = self.run(['git', 'diff', 'origin/master'])\n self.clean()\n if error != '':\n return False, error\n if diff == '':\n # No error message is returned for empty diff. The patch might be\n # empty or has been exported.\n return False, ''\n return True, ''", "def diff(ctx: \"PlanemoCliContext\", directory: str, range: str) -> List[str]:\n cmd = f\"cd '{directory}' && git diff --name-only '{range}' --\"\n stdout, _ = io.communicate(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n return [line.strip() for line in unicodify(stdout).splitlines() if line]", "def check_diff_as_arg(self):\n if self.args.diff is True:\n if (\n self.args.pre_snapfile is not None\n and os.path.isfile(self.args.pre_snapfile)\n ) and (\n self.args.post_snapfile is not None\n and os.path.isfile(self.args.post_snapfile)\n ):\n comp = Comparator()\n comp.compare_diff(self.args.pre_snapfile, self.args.post_snapfile, None)\n sys.exit(1)", "def colordiff(txt):\n for line in txt:\n line = line.rstrip()\n if line.startswith((\"+++ \", \"--- \")):\n ansiprint(line, fg=Color.yellow, i=True)\n continue\n if line.startswith(\"+\"):\n ansiprint(line, fg=Color.green, i=True)\n continue\n if line.startswith(\"-\"):\n ansiprint(line, fg=Color.red, i=True)\n continue\n if line.startswith(\"@@\"):\n ansiprint(line, fg=Color.magenta, i=True)\n continue\n print(line)", "def __gitExtendedDiff(self):\n self.vcs.gitExtendedDiff(self.project.getProjectPath())", "def mri_diff(file1, file2):\n cmd = 'mri_diff {} {}'.format(file1, file2)\n\n try:\n output = subprocess.call(cmd, stderr=subprocess.STDOUT, shell=True)\n return output\n except (subprocess.CalledProcessError, ValueError):\n return -1", "def check_diffs():\n process = Popen([\"git\", \"diff\", \"HEAD^\", \"--name-only\"], stdout=PIPE)\n\n diff, stderr = process.communicate()\n\n if process.returncode !=0:\n raise Exception(\"Unable to do git diff\")\n return diff.splitlines(False)", "def test_documentation_popxl_autodiff(self):\n filename = \"autodiff.py\"\n self.run_python(filename, file_dir=working_dir, working_dir=working_dir)", "def cmd_diff(self):\n if self.use_etc_tmp:\n if 'etc-tmp' in self.repo.branches:\n self.repo.checkout('etc-tmp')\n else:\n print('The etc-tmp branch does not exist')\n return\n else:\n self.repo.checkout('etc')\n\n suffixes = ['.pacnew', '.pacsave', '.pacorig']\n etc_files = list_rpaths(self.root_dir, ROOT_SUBDIR,\n suffixes=suffixes, prefixes=self.exclude_prefixes)\n repo_files = list_rpaths(self.repodir, ROOT_SUBDIR)\n print('\\n'.join(sorted(set(etc_files).difference(repo_files))))", "def diff(self, top_repo_path):\n p = Popen(\n [\"git\", \"diff\", \"--numstat\"], stdout=PIPE, stderr=PIPE, cwd=top_repo_path\n )\n my_output, my_error = p.communicate()\n if p.returncode == 0:\n result = []\n line_array = my_output.decode(\"utf-8\").splitlines()\n for line in line_array:\n linesplit = line.split()\n result.append(\n {\n \"insertions\": linesplit[0],\n \"deletions\": linesplit[1],\n \"filename\": linesplit[2],\n }\n )\n return {\"code\": p.returncode, \"result\": result}\n else:\n return {\"code\": p.returncode, \"message\": my_error.decode(\"utf-8\")}", "def _diff_and_commit(self, commit_msg=''):\n if not commit_msg:\n if 'commit_msg' not in self.data:\n # Ask until we get a non-empty commit message.\n while not commit_msg:\n commit_msg = utils.get_input(\n \"What is the commit message? \")\n else:\n commit_msg = self.data['commit_msg']\n\n diff_cmd = self.vcs.cmd_diff()\n diff = execute_command(diff_cmd)\n if sys.version.startswith('2.6.2'):\n # python2.6.2 bug... http://bugs.python.org/issue5170 This is the\n # spot it can surface as we show a part of the changelog which can\n # contain every kind of character. The rest is mostly ascii.\n print(\"Diff results:\")\n print(diff)\n else:\n # Common case\n logger.info(\"The '%s':\\n\\n%s\\n\", diff_cmd, diff)\n if utils.ask(\"OK to commit this\"):\n msg = commit_msg % self.data\n msg = self.update_commit_message(msg)\n commit_cmd = self.vcs.cmd_commit(msg)\n commit = execute_command(commit_cmd)\n logger.info(commit)", "def diff(request):\n if request.patch.no_base_file:\n # Can't show side-by-side diff since we don't have the base file. Show the\n # unified diff instead.\n return patch_helper(request, 'diff')\n\n patchset = request.patchset\n patch = request.patch\n\n patchsets = list(request.issue.patchsets)\n\n context = _get_context_for_user(request)\n column_width = _get_column_width_for_user(request)\n if patch.is_binary:\n rows = None\n else:\n try:\n rows = _get_diff_table_rows(request, patch, context, column_width)\n except FetchError as err:\n return HttpTextResponse(str(err), status=404)\n\n _add_next_prev(patchset, patch)\n return respond(request, 'diff.html',\n {'issue': request.issue,\n 'patchset': patchset,\n 'patch': patch,\n 'view_style': 'diff',\n 'rows': rows,\n 'context': context,\n 'context_values': models.CONTEXT_CHOICES,\n 'column_width': column_width,\n 'patchsets': patchsets,\n })", "def mri_diff(file1, file2):\n cmd = 'mri_diff %s %s' % (file1, file2)\n\n try:\n #print(cmd)\n output = subprocess.call(cmd, stderr=subprocess.STDOUT, shell=True)\n return output\n except (subprocess.CalledProcessError,ValueError):\n return -1", "def test_ddiff_v2(self):\n print \"\\n\"\n for d in ddiff_v2(a, b): print d\n self.assertEqual(d, \"+FUN\")", "def check_clang_format(project, commit, _desc, diff, options=None):\n tool = get_helper_path('clang-format.py')\n clang_format = options.tool_path('clang-format')\n git_clang_format = options.tool_path('git-clang-format')\n tool_args = (['--clang-format', clang_format, '--git-clang-format',\n git_clang_format] +\n options.args(('--style', 'file', '--commit', commit), diff))\n cmd = [tool] + tool_args\n fixup_func = _fixup_func_caller([tool, '--fix'] + tool_args)\n return _check_cmd('clang-format', project, commit, cmd,\n fixup_func=fixup_func)", "def test_complex_diff(self):\n full_diff, diffs = self._read_diff_fixture(\n 'git_complex.diff',\n expected_num_diffs=7)\n\n parsed_files = self.tool.get_parser(full_diff).parse()\n self.assertEqual(len(parsed_files), 7)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'cfg/testcase.ini',\n orig_file_details=b'5e35098',\n modified_filename=b'cfg/testcase.ini',\n modified_file_details=b'e254ef4',\n old_unix_mode='100644',\n new_unix_mode='100644',\n insert_count=2,\n delete_count=1,\n data=diffs[0])\n\n self.assert_parsed_diff_file(\n parsed_files[1],\n orig_filename=b'tests/models.py',\n orig_file_details=PRE_CREATION,\n modified_filename=b'tests/models.py',\n modified_file_details=b'e69de29',\n new_unix_mode='100644',\n data=diffs[1])\n\n self.assert_parsed_diff_file(\n parsed_files[2],\n orig_filename=b'tests/tests.py',\n orig_file_details=PRE_CREATION,\n modified_filename=b'tests/tests.py',\n modified_file_details=b'e279a06',\n new_unix_mode='100644',\n insert_count=2,\n data=diffs[2])\n\n self.assert_parsed_diff_file(\n parsed_files[3],\n orig_filename=b'pysvn-1.5.1.tar.gz',\n orig_file_details=PRE_CREATION,\n modified_filename=b'pysvn-1.5.1.tar.gz',\n modified_file_details=b'86b520c',\n new_unix_mode='100644',\n binary=True,\n data=diffs[3])\n\n self.assert_parsed_diff_file(\n parsed_files[4],\n orig_filename=b'readme',\n orig_file_details=b'5e35098',\n modified_filename=b'readme',\n modified_file_details=b'e254ef4',\n old_unix_mode='100644',\n new_unix_mode='100644',\n insert_count=1,\n delete_count=1,\n data=diffs[4])\n\n self.assert_parsed_diff_file(\n parsed_files[5],\n orig_filename=b'OLDFILE',\n orig_file_details=b'8ebcb01',\n modified_filename=b'OLDFILE',\n modified_file_details=b'0000000',\n old_unix_mode='100644',\n deleted=True,\n delete_count=1,\n data=diffs[5])\n\n self.assert_parsed_diff_file(\n parsed_files[6],\n orig_filename=b'readme2',\n orig_file_details=b'5e43098',\n modified_filename=b'readme2',\n modified_file_details=b'e248ef4',\n old_unix_mode='100644',\n new_unix_mode='100644',\n insert_count=1,\n delete_count=1,\n data=diffs[6])", "def test_diff_with_unicode(self):\n diff = (\n 'diff --git a/cfg/téstcase.ini b/cfg/téstcase.ini\\n'\n 'index cc18ec8..5e70b73 100644\\n'\n '--- a/cfg/téstcase.ini\\n'\n '+++ b/cfg/téstcase.ini\\n'\n '@@ -1,6 +1,7 @@\\n'\n '+blah blah blah\\n'\n ' [mysql]\\n'\n ' hóst = localhost\\n'\n ' pórt = 3306\\n'\n ' user = user\\n'\n ' pass = pass\\n'\n '-db = pyunít\\n'\n '+db = pyunít\\n'\n ).encode('utf-8')\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename='cfg/téstcase.ini'.encode('utf-8'),\n orig_file_details=b'cc18ec8',\n modified_filename='cfg/téstcase.ini'.encode('utf-8'),\n modified_file_details=b'5e70b73',\n old_unix_mode='100644',\n new_unix_mode='100644',\n insert_count=2,\n delete_count=1,\n data=diff)", "def test_compute_delta(civic, diff):\n assert civic.compute_delta() == diff\n\n # Test when _updated_json is not in kwargs\n cd = Delta(MAIN_JSON, 'civic')\n cd.compute_delta()\n fn = APP_ROOT / 'data' / 'civic' / 'harvester' / \\\n f\"civic_harvester_{date.today().strftime('%Y%m%d')}.json\"\n assert fn.exists()\n os.remove(fn)\n assert not fn.exists()", "def diff(target, last=False):\n\n assert os.path.exists(target), \"%s does not exist!\" % target\n # assert diff_type in [ALL_DIFF, STAGED_DIFF, UNSTAGED_DIFF], \"diff_type is bad!\"\n git_tree = get_git_tree(target)\n results = b\"\"\n\n if git_tree is not None:\n args = [\"diff\", \"--no-color\"]\n\n if last:\n revs = getrevision(target, 2)\n\n if len(revs) == 2:\n args += [revs[1], \"--\"]\n else:\n args = None\n else:\n args += [\"HEAD\", \"--\"]\n\n # Staged only\n # elif diff_type == STAGED_DIFF:\n # args.append(\"--cached\")\n\n if args:\n results = gitopen(args + [target], git_tree)\n return results" ]
[ "0.61412245", "0.600993", "0.5725794", "0.53208023", "0.52990735", "0.5269064", "0.5248664", "0.5227579", "0.5193025", "0.5187924", "0.51672775", "0.51550215", "0.51132864", "0.5070487", "0.506192", "0.5020858", "0.5008293", "0.5007024", "0.5005436", "0.5001004", "0.49886513", "0.4985321", "0.49629328", "0.4937326", "0.4905917", "0.48920503", "0.48814023", "0.48546934", "0.48464224", "0.48456833" ]
0.7053977
0
Return all files in the working directory that match the patterns and are tracked (clean, modified or added). Ignored or unknown files are only matched when given literally. If patterns is empty, match all tracked files. Supports options['include'] and options['exclude'] which work like the include and exclude options of hg status.
def _get_files(repo, patterns, options): ctx = repo[None] match = match_func(repo, ctx, patterns, options) try: status = ctx.status(listclean=True, listignored=True, listunknown=True) except TypeError: # Compatibility with older Mercurial versions. status = ctx.status(clean=True, ignored=True, unknown=True) modified = status[0] added = status[1] unknown = status[4] ignored = status[5] clean = status[6] files = [] for file_list in [clean, modified, added]: for filename in file_list: if match(filename): files.append(filename) for file_list in [ignored, unknown]: for filename in file_list: if match.exact(filename): files.append(filename) return files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_files(patterns, files):\n\tall_files = files if isinstance(files, collections.Container) else list(files)\n\treturn_files = set()\n\tfor pattern in patterns:\n\t\tif pattern.include is not None:\n\t\t\tresult_files = pattern.match(all_files)\n\t\t\tif pattern.include:\n\t\t\t\treturn_files.update(result_files)\n\t\t\telse:\n\t\t\t\treturn_files.difference_update(result_files)\n\treturn return_files", "def match_any_paths(paths, included_patterns=None, excluded_patterns=None, case_sensitive=True):\n included = [\"*\"] if included_patterns is None else included_patterns\n excluded = [] if excluded_patterns is None else excluded_patterns\n\n for path in paths:\n if _match_path(path, set(included), set(excluded), case_sensitive):\n return True\n return False", "def search(self, src, exclude_pattern = [\"**/*.pyc\"], include_pattern = [\"**/*.py\"]):\n src = os.path.abspath(src)\n \n _target = Path(src)\n _target._flavour.casefold = lambda x : x # basic windows path don't distinguish upper / lower case.\n allfiles = list(_target.glob(\"**/*\"))\n \n exclude = list()\n for _ex in exclude_pattern:\n exclude += _target.glob(_ex) \n \n include = list()\n for _in in include_pattern:\n include += _target.glob(_in) \n \n _target_path = set(allfiles) - set(exclude) | set(include)\n \n _target_dir_path = sorted(list(x for x in _target_path if x.is_dir() is True))\n _target_file_path = sorted(list(x for x in _target_path if x.is_file() is True))\n \n return _target_dir_path, _target_file_path", "def filter_paths(paths, included_patterns=None, excluded_patterns=None, case_sensitive=True):\n included = [\"*\"] if included_patterns is None else included_patterns\n excluded = [] if excluded_patterns is None else excluded_patterns\n\n for path in paths:\n if _match_path(path, set(included), set(excluded), case_sensitive):\n yield path", "def grep(pattern, *files_or_paths):\n matches = []\n\n for fop in files_or_paths:\n with fileobj(fop) as fo:\n matches.extend((line for line in fo if re.match(pattern, line)))\n\n return matches", "def gather_files(path: str, patterns: list, antipatterns: list):\n curr_path = os.getcwd()\n os.chdir(path)\n positive_iterators = [glob.iglob(os.path.join('**', pattern), recursive=True) for pattern in\n patterns]\n linted_files = itertools.chain(*positive_iterators)\n linted_files = (os.path.join(path, file) for file in linted_files)\n linted_files = negative_filtering(antipatterns.copy(), linted_files)\n ret = list(linted_files)\n os.chdir(curr_path)\n return ret", "def grep(self, pattern, files=[], rev=None, no_follow=False,\n text=False, annotations=['rev', 'file', 'line'],\n ignore_case=False, match_text=True,\n include=None, exclude=None):\n\n # Normalise the input\n files = self._map_files(files)\n rev = self._map_revs(rev)\n\n # Select annotations\n user = date = file = line = changeset = type_ = False\n\n for n in annotations:\n if n == 'user':\n user = True\n elif n == 'date':\n date = True\n elif n == 'file':\n file = True\n elif n == 'line':\n line = True\n elif n == 'changeset':\n changeset = True\n elif n == 'type':\n type_ = True\n\n if not user and not date and not file and not line and not changeset \\\n and not type_ and not match_text:\n raise ValueError('you probably want either some annotations or the match text')\n\n out = self._client.execute('grep', pattern, files,\n r=rev, f=not no_follow,\n a=text, i=ignore_case,\n l=not match_text,\n n=line, u=user, d=date,\n I=include, X=exclude,\n v=True,\n print0=True)\n\n fields = 2 # filename rev\n if user:\n fields += 1\n if date:\n fields += 1\n if line:\n fields += 1\n if type_:\n fields += 1\n if match_text:\n fields += 1\n \n for l in every(out.split('\\0'), fields):\n if match_text:\n out = GrepString(l[-1])\n else:\n out = GrepResult()\n\n out.file = l.pop(0)\n out.rev = int(l.pop(0))\n\n if line:\n out.line = int(l.pop(0))\n if type_:\n out.type = l.pop(0)\n if user:\n out.user = l.pop(0)\n if date:\n the_date,offset = l.pop(0).rsplit(' ', 1)\n the_date = datetime.datetime.strptime(the_date,\n '%a %b %d %H:%M:%S %Y')\n ofs = int(offset[:-2]) * 60\n was_negative = offset < 0\n if was_negative:\n ofs = -ofs\n ofs += int(offset[-2:])\n if was_negative:\n ofs = -ofs\n tzinfo = SimpleTzInfo(ofs)\n out.date = the_date.replace(tzinfo=tzinfo)\n\n yield out", "def gitignore(self):\n patterns = []\n for path in ('.gitignore', '.git/info/exclude'):\n try:\n with open(pjoin(self.options.target_repo.location, path)) as f:\n patterns.extend(f)\n except FileNotFoundError:\n pass\n except IOError as e:\n logger.warning(f'failed reading {path!r}: {e}')\n return PathSpec.from_lines('gitwildmatch', patterns)", "def locate(self, patterns, rev=None, fullpath=False, include=None,\n exclude=None):\n rev = self._map_rev(rev)\n\n out = self._client.execute('locate', patterns, r=rev, print0=True,\n f=fullpath, I=include, X=exclude)\n\n return out.split('\\0')", "def _filter_patterns(self, filepath, pattern_prefix, exclude_pattern, include_pattern):\n isfilter = False\n if exclude_pattern:\n full_exclude_pattern = os.path.join(pattern_prefix, exclude_pattern)\n if fnmatch.fnmatch(filepath, full_exclude_pattern):\n isfilter = True\n if include_pattern:\n full_include_pattern = os.path.join(pattern_prefix, include_pattern)\n if fnmatch.fnmatch(filepath, full_include_pattern):\n isfilter = False\n return isfilter", "def find_files(self,start_dir=None,pattern=\"*\",file_extention=\"*.fif\",recursive=True,debug=False,abspath=False,\n ignore_case=False):\n pattern = self.update_pattern(pattern,ignore_case=ignore_case)\n \n if not isinstance(file_extention,(list)):\n s = file_extention\n file_extention = list()\n file_extention.append(s)\n \n if debug or self.debug:\n logger.debug(\"start dir : {}\\n\".format(start_dir) +\n \" -> glob pattern : {}\\n\".format(pattern) +\n \" -> file extention : {}\\n\".format(file_extention) +\n \" -> glob recursive : {}\\n\".format(recursive) +\n \" -> adding abs path: {}\\n\".format(abspath)\n )\n files_found = []\n with self.working_directory(start_dir):\n for fext in file_extention: # ToDo fext re /\\.vhdr|vmrk|eeg$/\n for f in glob.iglob(pattern + fext,recursive=recursive):\n #print(f)\n if abspath:\n files_found.append(os.path.abspath(os.path.join(start_dir,f)))\n else:\n files_found.append(f)\n \n files_found.sort()\n return files_found", "def allow_patterns(*patterns):\n\n def _ignore_patterns(path, names):\n\n files_only = [\n name for name in names if not os.path.isdir(os.path.join(path, name))\n ]\n\n allowed_files = []\n for pattern in patterns:\n allowed_files.extend(fnmatch.filter(files_only, pattern))\n\n ignore_others = set(files_only) - set(allowed_files)\n return ignore_others\n\n return _ignore_patterns", "def filter_paths(pathnames, patterns=None, ignore_patterns=None):\n result = []\n if patterns is None:\n patterns = ['*']\n if ignore_patterns is None:\n ignore_patterns = []\n for pathname in pathnames:\n if match_patterns(pathname, patterns) and not match_patterns(pathname,\n ignore_patterns):\n result.append(pathname)\n return result", "def find_files(self, config):\n matchers = compile_matchers(\n config.exclude_patterns[:] +\n config.exclude_trees +\n [d + \".rst\" for d in config.unused_docs] +\n [d + \".yay\" for d in config.unused_docs] +\n ['**/' + d for d in config.exclude_dirnames] +\n ['**/_sources']\n )\n self.found_docs = set(get_matching_docs(\n self.srcdir, exclude_matchers=matchers))", "def edit_files(patterns, expressions, # pylint: disable=R0913, R0914\r\n start_dir=None, max_depth=1, dry_run=True,\r\n output=sys.stdout):\r\n # Makes for a better diagnostic because str are also iterable.\r\n assert not isinstance(patterns, str), \"patterns should be a list\"\r\n assert not isinstance(expressions, str), \"expressions should be a list\"\r\n\r\n # Shortcut: if there is only one pattern, make sure we process just that.\r\n if len(patterns) == 1 and not start_dir:\r\n pattern = patterns[0]\r\n directory = os.path.dirname(pattern)\r\n if directory:\r\n patterns = [os.path.basename(pattern)]\r\n start_dir = directory\r\n max_depth = 1\r\n\r\n processed_paths = []\r\n editor = Editor(dry_run=dry_run)\r\n if expressions:\r\n editor.set_code_expr(expressions)\r\n if not start_dir:\r\n start_dir = os.getcwd()\r\n for root, dirs, files in os.walk(start_dir): # pylint: disable=W0612\r\n if max_depth is not None:\r\n relpath = os.path.relpath(root, start=start_dir)\r\n depth = len(relpath.split(os.sep))\r\n if depth > max_depth:\r\n continue\r\n names = []\r\n for pattern in patterns:\r\n names += fnmatch.filter(files, pattern)\r\n for name in names:\r\n path = os.path.join(root, name)\r\n processed_paths.append(os.path.abspath(path))\r\n diffs = editor.edit_file(path)\r\n if dry_run:\r\n output.write(\"\".join(diffs))\r\n if output != sys.stdout:\r\n output.close()\r\n return processed_paths", "def match_file(patterns, file):\n\tmatched = False\n\tfor pattern in patterns:\n\t\tif pattern.include is not None:\n\t\t\tif file in pattern.match((file,)):\n\t\t\t\tmatched = pattern.include\n\treturn matched", "def matching(pattern: str, kind: Optional[str] = None,\n dirpath: Optional[Union[str, Path]] = None, **options\n) -> List[Tuple[Path, Path]]:\n\n if dirpath or kind == 'askdirectory':\n\n # dialog for dir if none given\n dirpath = standard(kind, **options) if not dirpath else Path(dirpath)\n # separate file paths in dirpat by suffix\n filepaths = dirpath.glob('*.*')\n sorted_paths = defaultdict(list)\n for path in filepaths:\n sorted_paths[path.suffix].append(path)\n paths, others = list(sorted_paths.values())\n\n elif kind == 'askopenfilenames':\n\n # open two dialogs to user select files to match\n paths = standard(kind, title='Select File Set 1', **options)\n others = standard(kind, title='Select File Set 2', **options)\n\n else:\n\n msg = (\"matching dialog requires 'kind' argument to be one of '{}' \"\n \"or '{}' or a Path passed to the dirpath argument.\")\n raise TypeError(msg.format('askdirectory', 'askopenfilenames'))\n\n return re_match(paths, others, pattern)", "def glob_files(root_dir, includes=None, excludes=None, gcdtignore=None):\n # docu here: https://docs.python.org/3/library/pathlib.html\n if not includes:\n includes = ['**']\n else:\n # we need to iterate multiple times (iterator safeguard)\n includes = list(includes)\n\n if excludes:\n # we need to iterate multiple times (iterator safeguard)\n excludes = list(excludes)\n\n if gcdtignore:\n spec = pathspec.PathSpec.from_lines('gitwildmatch', gcdtignore)\n log.debug('gcdtignore patterns: %s', gcdtignore)\n\n while includes:\n pattern = includes.pop(0)\n # for compatibility with std. python Lib/glop.py:\n # >>>If recursive is true, the pattern '**' will match any files and\n # zero or more directories and subdirectories.<<<\n if pattern.endswith('**'):\n pattern += '/*'\n matches = list(Path(root_dir).glob(pattern))\n\n for m in matches:\n if m.is_dir():\n continue\n\n # some discussion on how to convert a pattern into regex:\n # http://stackoverflow.com/questions/27726545/python-glob-but-against-a-list-of-strings-rather-than-the-filesystem\n pp = PurePath(m)\n\n # check if m is contained in remaining include patterns\n # (last one wins)\n if includes and any(map(lambda p: pp.match(p), includes)):\n continue\n\n # check if m is contained in exclude pattern\n if excludes and any(map(lambda p: pp.match(p), excludes)):\n continue\n\n # check if m is contained in gcdtignore\n if gcdtignore and spec.match_file(str(m)):\n log.debug('Skipped file \\'%s\\' due to gcdtignore pattern',\n str(m.relative_to(root_dir)))\n continue\n\n yield (str(m), str(m.relative_to(root_dir)))", "def zglobs(cls, *globspecs, **kw):\r\n root = kw.pop('root', os.curdir)\r\n patterns = [(os.path.basename(spec).startswith('*'),\r\n re.compile(fnmatch_translate_extended(spec))) for spec in globspecs]\r\n\r\n def matcher(path):\r\n for no_hidden, pattern in patterns:\r\n # Ignore hidden files when globbing wildcards.\r\n if not (no_hidden and os.path.basename(path).startswith('.')):\r\n if pattern.match(path):\r\n return True\r\n return False\r\n\r\n return cls(lambda: set(cls._do_rglob(matcher, allow_dirs=True, root=root, **kw)))", "def _search_files(self, path, path_glob):\n files = glob.glob(\"%s/%s\"% (path, path_glob))\n files_filt = []\n print \"Searching for matching files in %s/:\" % path\n for f in files:\n if re.search(self._info['pattern'], os.path.basename(f)) is not None:\n files_filt.append(f)\n if len(files_filt) == 0:\n print \"None found.\"\n return files_filt", "def search(regex, paths, args, ignore_case=False, verbose=False):\n printer = MultiLinePrinter()\n for path in paths:\n if os.path.isdir(path):\n for dirname, subdirs, files in os.walk(path):\n for filename in files:\n if not KNOWN_TYPES or any([filename.endswith(suffix) for\n suffix in KNOWN_TYPES]):\n search_file(os.path.join(dirname, filename), regex,\n ignore_case, args.undefined, printer)\n else:\n search_file(path, regex, ignore_case, args.undefined, printer)", "def analyse_files_against_regex_pattern(files, pattern):\n # boolean to check if docs are to be generated\n comments_for_jsdoc_exists = False\n # reading file content and comparing with pattern\n for filepath in files:\n if comments_for_jsdoc_exists:\n break\n with open(filepath, encoding='utf-8') as code:\n file_content = code.read()\n matches = pattern.search(file_content)\n if matches:\n comments_for_jsdoc_exists = True\n break\n\n return comments_for_jsdoc_exists", "def get_file_list(work_dir, match_flag='*.*'):\n matches = []\n for root, dir, files in os.walk(work_dir):\n for items in fnmatch.filter(files, match_flag):\n matches.append(os.path.realpath(os.path.join(root, items)))\n\n return matches", "def find_files(path, include=None, exclude=None):\n # If no extension is selected, use the wild card.\n if include is None:\n include = '*'\n # Make sure it is an iterable,\n include = assert_is_iter(include)\n # Find files and flatten.\n files = [glob.glob(f'{path}/**/*.{ext}', recursive=True) for ext in include]\n # The return of deep_flatten is an generator.\n files = list(morsels.deep_flatten(files))\n # Exclude files that the user does not want.\n if exclude is not None:\n # Make sure it is an iterable,\n exclude = assert_is_iter(exclude)\n # The slice is used to remove the dot from the beginning of the extension.\n files = [file for file in files if not os.path.splitext(file)[-1][1:] in exclude]\n return files", "def grep(directory, include, findwhat, recursive=True, ignorecase=False, regexp=False, display=None, reversed=False):\n\tfrom os import walk\n\tfrom os.path import join\n\tfrom fnmatch import fnmatchcase\n\tfrom io import open\n\t\n\tdef __search(findwhat, content, ignorecase, regexp):\n\t\t\"\"\" Search in content string \"\"\"\n\t\tfrom re import search, IGNORECASE\n\t\tif regexp:\n\t\t\tif ignorecase:\n\t\t\t\tflag = IGNORECASE\n\t\t\telse:\n\t\t\t\tflag = 0\n\t\t\tif search(findwhat, content, flag):\n\t\t\t\treturn True\n\t\telse:\n\t\t\tif ignorecase:\n\t\t\t\tcontent = content.lower()\n\t\t\t\tfindwhat = findwhat.lower()\n\t\t\t\t\n\t\t\tif content.find(findwhat) != -1:\n\t\t\t\treturn True\n\t\treturn False\n\n\tdef __grep(findwhat, filename, ignorecase, regexp):\n\t\t\"\"\" Grep string in filename \"\"\"\n\t\tresult = []\n\t\ttry:\n\t\t\tencoding = \"utf8\"\n\t\t\tcontent = open(filename,\"r\", encoding=encoding).read()\n\t\texcept FileNotFoundError:\n\t\t\treturn result\n\t\texcept UnicodeDecodeError:\n\t\t\tencoding = \"latin-1\"\n\t\t\tcontent = open(filename,\"r\", encoding=encoding).read()\n\t\t\t\n\t\tif __search(findwhat, content, ignorecase, regexp):\n\t\t\tlines = open(filename,\"r\", encoding=encoding).readlines()\n\t\t\tlineNumber = 1\n\t\t\tfor line in lines:\n\t\t\t\tif __search(findwhat, line, ignorecase, regexp):\n\t\t\t\t\tresult.append((filename, lineNumber, line.strip()))\n\t\t\t\tlineNumber += 1\n\t\treturn result\n\n\tresult = []\n\tfilesPatterns = include.split(\";\")\n\tfor dirpath, dummy, filenames in walk(directory):\n\t\tfor filename in filenames:\n\t\t\tfor filePattern in filesPatterns:\n\t\t\t\tif recursive or (recursive == False and dirpath == directory):\n\t\t\t\t\tif fnmatchcase(filename, filePattern):\n\t\t\t\t\t\tfilename = join(dirpath,filename)\n\t\t\t\t\t\tfounds = __grep(findwhat, filename, ignorecase, regexp)\n\t\t\t\t\t\tresult += founds\n\t\t\t\t\t\tif display != None:\n\t\t\t\t\t\t\tif reversed == False:\n\t\t\t\t\t\t\t\tfor filename, line, content in founds:\n\t\t\t\t\t\t\t\t\tif type(display) == type(True):\n\t\t\t\t\t\t\t\t\t\tif display:\n\t\t\t\t\t\t\t\t\t\t\tprint(\"%s:%d:%s\"%(filename, line, content))\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tdisplay(filename, line, content)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tif founds == []:\n\t\t\t\t\t\t\t\t\tif type(display) == type(True):\n\t\t\t\t\t\t\t\t\t\tif display:\n\t\t\t\t\t\t\t\t\t\t\tprint(\"%s:0:not found\"%(filename, line, content))\n\t\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\t\tdisplay(filename, 0, \"not found\")\n\t\t\t\t\t\t\t\t\t\n\treturn result", "def get_files_patterns(m_type, pattern, wdir, Ldir=False, Linverse=False, Lparents=None):\n Lshow = False\n matched_files=[]\n ### Codes for prefix\n dir_files = os.listdir(wdir)\n i=0\n for fname in dir_files:\n for patt in pattern:\n if m_type == 'p':\n if re.match(patt, fname):\n if Ldir or not os.path.isdir(fname):\n matched_files.append(fname)\n #print (patt, fname)\n ### for suffix\n elif m_type == 's':\n if fname.endswith(patt):\n #if not Linverse:\n if Ldir or not os.path.isdir(fname):\n matched_files.append(fname)\n ### included parents and directories\n if Lparents:\n #relative_files = get_relatives_suff(fname, dir_files)\n fnlist = re.split('\\.', fname)\n if os.path.isdir(fnlist[0]):\n rel_dir = fnlist[0]\n print(f\"{i:02d}: relative files {rel_dir}\")\n matched_files.append(rel_dir)\n i += 1\n ### for search\n elif m_type == 'm':\n if re.search(patt, fname):\n ### if it is dir skip\n if Ldir or not os.path.isdir(fname):\n matched_files.append(fname)\n if Lshow:\n print(f\"detect {fname}\") # in {match} {matches}\")\n \n #elif Linverse:\n # if not os.path.isdir(fname):\n # matched_files.append(fname)\n return matched_files", "def find_files(directory, patterns):\n for root, dirs, files in os.walk(directory):\n for basename in files:\n if \".pyc\" not in basename and \"__pycache__\" not in basename:\n for pattern in patterns:\n if fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n yield filename", "def glob(self, pathname, with_matches=False):\r\n return list(self.iglob(pathname, with_matches))", "def filesInDir(self, path=None, pattern=None):\n if path is None:\n path = self.myDir\n if os.path.isfile(path):\n fileList = [path]\n else:\n fileList = os.listdir(path)\n if pattern is None:\n return fileList\n results = []\n for fileName in fileList:\n if pattern in fileName:\n results.append(fileName)\n return results", "def ignore_patterns(*patterns):\n def _ignore_patterns(path, names):\n ignored_names = []\n for pattern in patterns:\n ignored_names.extend(fnmatch.filter(names, pattern))\n return set(ignored_names)\n return _ignore_patterns" ]
[ "0.66072595", "0.5907133", "0.57880306", "0.57451385", "0.5728306", "0.5712232", "0.5626746", "0.557874", "0.5535921", "0.5524648", "0.54892516", "0.54789233", "0.5470777", "0.5459771", "0.5456681", "0.54244196", "0.5411096", "0.5399698", "0.5313874", "0.5283738", "0.5249157", "0.5247507", "0.524675", "0.52367395", "0.5229557", "0.52262765", "0.5207992", "0.5205714", "0.5195271", "0.5189374" ]
0.6985878
0
Run uncrustify on the specified files or directories. If no files are specified, operates on the whole working directory.
def uncrustify(ui, repo, *patterns, **options): if options["diff"] and options["modify"]: raise util.Abort("cannot specify --diff and --modify at the same time") if options["diff"]: mode = "diff" elif options["modify"]: mode = "modify" else: mode = "status" no_backup = options["no_backup"] show_clean = options["show_clean"] paths = [path for path in _get_files(repo, patterns, options) if path.endswith((".cc", ".h"))] uncrustify_cfg = repo.pathto(".uncrustify.cfg") relpaths = [repo.pathto(path) for path in paths] if not os.path.exists(uncrustify_cfg): raise util.Abort("could not find .uncrustify.cfg in repository root") _run_uncrustify(uncrustify_cfg, relpaths) ctx = repo[None] for path in paths: relpath = repo.pathto(path) uncr_path = path + SUFFIX uncr_relpath = relpath + SUFFIX have_changes = (ctx[path].data() != ctx[uncr_path].data()) if have_changes: if mode == "status": ui.write("M %s\n" % relpath, label="status.modified") util.unlink(uncr_relpath) elif mode == "diff": _run_diff(relpath, uncr_relpath) util.unlink(uncr_relpath) elif mode == "modify": if not no_backup: util.rename(relpath, relpath + ".crusty") util.rename(uncr_relpath, relpath) if not ui.quiet: ui.write("%s uncrustified\n" % relpath) else: if show_clean: if mode == "status": ui.write("C %s\n" % relpath, label="status.clean") elif mode == "modify": ui.write("%s is clean\n" % relpath) util.unlink(uncr_relpath)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove(args):\n files = []\n for path in args.files:\n if os.path.isdir(path):\n ft = filetree(path)\n files.extend(ft.filelist())\n else:\n files.append(path)\n for path in files:\n relpath = os.path.normpath(os.path.relpath(path, args.base))\n if relpath in args.cache:\n del args.cache[args.cache.index(relpath)]\n if args.delete and os.path.exists(path):\n os.remove(path)\n args.update = True\n return", "def scan( # pylint: disable=too-many-locals, too-many-branches\n self, package: Package, level: str\n ) -> Optional[List[Issue]]:\n if \"make_targets\" not in package and \"headers\" not in package:\n return []\n\n if self.plugin_context is None:\n return None\n\n uncrustify_bin = \"uncrustify\"\n if self.plugin_context.args.uncrustify_bin is not None:\n uncrustify_bin = self.plugin_context.args.uncrustify_bin\n\n flags: List[str] = []\n flags += self.get_user_flags(level)\n\n files: List[str] = []\n if \"make_targets\" in package:\n for target in package[\"make_targets\"]:\n files += target[\"src\"]\n if \"headers\" in package:\n files += package[\"headers\"]\n\n total_output: List[str] = []\n\n try:\n format_file_name = self.plugin_context.resources.get_file(\"uncrustify.cfg\")\n\n for src in files:\n cmd = [uncrustify_bin, \"-c\", format_file_name, \"-f\", src]\n output = subprocess.check_output(\n cmd, # type: ignore\n stderr=subprocess.STDOUT,\n universal_newlines=True,\n )\n src_cmd = [\"cat\", src]\n src_output = subprocess.check_output(\n src_cmd, stderr=subprocess.STDOUT, universal_newlines=True\n )\n diff = difflib.context_diff(\n output.splitlines(), src_output.splitlines()\n )\n found_diff = False\n output = output.split(\"\\n\", 1)[1]\n for line in diff:\n if (\n line.startswith(\"---\")\n or line.startswith(\"***\")\n or line.startswith(\"! Parsing\")\n or src in line\n or line.isspace()\n ):\n continue\n # This is a bug I can't figure out yet.\n if \"#ifndef\" in line or \"#define\" in line:\n continue\n found_diff = True\n if found_diff:\n total_output.append(src)\n\n except subprocess.CalledProcessError as ex:\n output = ex.output\n logging.warning(\"uncrustify failed! Returncode = %d\", ex.returncode)\n logging.warning(\"%s exception: %s\", self.get_name(), ex.output)\n return None\n\n except OSError as ex:\n logging.warning(\"Couldn't find uncrustify executable! (%s)\", ex)\n return None\n\n for output in total_output:\n logging.debug(\"%s\", output)\n\n if self.plugin_context and self.plugin_context.args.output_directory:\n with open(self.get_name() + \".log\", \"w\", encoding=\"utf8\") as fid:\n for output in total_output:\n fid.write(output)\n\n issues: List[Issue] = self.parse_output(total_output, package)\n return issues", "def main():\r\n parser = CommonArgParser(__file__)\r\n parser.add_argument('src_dir', help='Source directory')\r\n parser.add_argument(\r\n 'out_dir',\r\n default='.',\r\n help=\"\"\"The directory the files to be extracted.\r\n (Default: Current directoty\"\"\")\r\n args = parser.parse_all()\r\n for f in next_file(args.src_dir, ['*.tgz', '*.tar.gz']):\r\n untgz(f, args.out_dir)", "def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())", "def cleanFiles(a_file_list):\n for entry in a_file_list:\n cmd = 'sudo rm ' + entry\n os.system(cmd)", "def remove_cruft_files(cls, files):\n valid_files = []\n for changes_file in files:\n if cls.is_changes(changes_file):\n LOG.debug(\"Checking: {c}\".format(c=changes_file))\n try:\n with mini_buildd.misc.open_utf8(changes_file) as cf:\n for fd in debian.deb822.Changes(cf).get(\"Files\", []):\n valid_files.append(fd[\"name\"])\n LOG.debug(\"Valid: {c}\".format(c=fd[\"name\"]))\n\n valid_files.append(os.path.basename(changes_file))\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Invalid changes file: {f}\".format(f=changes_file), e, logging.WARNING)\n\n for f in files:\n if os.path.basename(f) not in valid_files:\n # Be sure to never ever fail, just because cruft removal fails (instead log accordingly)\n try:\n if os.path.isdir(f):\n shutil.rmtree(f)\n else:\n os.remove(f)\n LOG.warning(\"Cruft file (not in any changes file) removed: {f}\".format(f=f))\n except BaseException as e:\n mini_buildd.config.log_exception(LOG, \"Can't remove cruft from incoming: {f}\".format(f=f), e, logging.CRITICAL)", "def clean(c):\n\n for sp_ns in ns_foreach_task_subdir(c):\n try:\n sp_ns.tasks.clean(c)\n except UnexpectedExit:\n pass", "def pyccel_clean_command():\n parser = ArgumentParser(description='Tool for removing files generated by pyccel')\n\n parser.add_argument('folders', metavar='N', type=str, nargs='*',\n help='The folders to be cleaned (default is the current folder')\n parser.add_argument('-n', '--not-recursive', action='store_false',\n help='Only run pyccel-clean in the current directory. Do not recurse into other folders')\n parser.add_argument('-s', '--remove-libs', action='store_true',\n help='Also remove any libraries generated by Python from the folder. Beware this may remove shared libraries generated by tools other than pyccel')\n parser.add_argument('-p', '--remove-programs', action='store_true',\n help='Also remove any programs from the folder. Beware this may remove programs unrelated to pyccel')\n args = parser.parse_args()\n\n folders = args.folders\n recursive = args.not_recursive\n remove_libs = args.remove_libs\n remove_programs = args.remove_programs\n\n if len(folders)==0:\n pyccel_clean(None, recursive, remove_libs, remove_programs)\n else:\n for f in folders:\n pyccel_clean(f, recursive, remove_libs, remove_programs)", "def run(self):\n # Call to the method of the parent to\n # clean common files and directories\n Clean.run(self)\n\n # Remove C and C++ files if the current working directory\n # is not a source distribution, since the source files\n # are needed by the package in release mode\n cwd = abspath(dirname(__file__))\n remove_c_files = not exists(join(cwd, \"PKG-INFO\"))\n\n if exists(\"build\"):\n shutil.rmtree(\"build\")\n\n for (dirpath, dirnames, filenames) in walk(MODNAME):\n for filename in filenames:\n extension = splitext(filename)[1]\n if filename.endswith((\".so\", \".pyd\", \".dll\", \".pyc\")):\n unlink(join(dirpath, filename))\n elif remove_c_files and extension in {\".c\", \".cpp\"}:\n pyx_file = str.replace(filename, extension, \".pyx\")\n # Remove the C and C++ files only when they are\n # generated from a Cython extension, because in\n # any other case, they really correspond to the\n # source code\n if exists(join(dirpath, pyx_file)):\n unlink(join(dirpath, filename))\n for ddirname in dirnames:\n if ddirname in {\"__pycache__\"}:\n shutil.rmtree(join(dirpath, ddirname))", "def remove_cruft(cls):\n cls.remove_cruft_files([\"{p}/{f}\".format(p=mini_buildd.config.INCOMING_DIR, f=f) for f in os.listdir(mini_buildd.config.INCOMING_DIR)])", "def scrub():\n\n\tlocal(\"rm -fr dist build\")\n\tlocal(\"find . -name \\\"*.pyc\\\" -exec rm '{}' ';'\")", "def cleanup(*args, **kwargs):\n for file in args:\n if exists(file):\n remove(file)\n for file in kwargs:\n if exists(file):\n remove(file)", "def cleanup_intermediate_files():\n\n dirs = (DIR_PAGE, DIR_SRGB, DIR_VTI, DIR_TIFF, DIR_BACK, DIR_TEXT)\n map(lambda dir: shutil.rmtree(os.path.join(cwd, dir)) , dirs)", "def cleanUpTemporaryFiles(options):\n os.system(\"rm \"+options.output_directory_per_run+\"/*.abundance\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*.phasing_score\")\n os.system(\"rm \"+options.output_directory_per_run+\"/*regionsOfInterest*\")\n os.system(\"mv \"+options.output_directory_per_run+\"/* \"+options.output_directory_per_run+\"/../\")\n os.system(\"rm -rf \"+options.output_directory_per_run)", "def main():\n for root, dirs, list in os.walk(\"/home/congcongchen/Desktop/grader/hw4/2014-10-12-123254\"): \n for i in list: \n dir = os.path.join(root, i) \n if i.endswith(\".zip\"):#unzip the file\n print dir\n\ttry:\n \tunzip(root,dir)\n\texcept:#catch all exception\n\t\tprint \"Error\"+dir \n\n\n for root, dirs, list in os.walk(\"/home/congcongchen/Desktop/grader/hw4/2014-10-12-123254\"): \n for i in list: \n dir = os.path.join(root, i) \n if i.endswith(\".cpp\"):#change the name of the file\n remove(root,i)", "def removeRedundantFiles(workdir, outputfiles=[]):\n\n logger.info(\"Removing redundant files prior to log creation\")\n\n workdir = os.path.abspath(workdir)\n\n dir_list = [\"AtlasProduction*\",\n \"AtlasPoint1\",\n \"AtlasTier0\",\n \"buildJob*\",\n \"CDRelease*\",\n \"csc*.log\",\n \"DBRelease*\",\n \"EvgenJobOptions\",\n \"external\",\n \"fort.*\",\n \"geant4\",\n \"geomDB\",\n \"geomDB_sqlite\",\n \"home\",\n \"o..pacman..o\",\n \"pacman-*\",\n \"python\",\n \"runAthena*\",\n \"share\",\n \"sources.*\",\n \"sqlite*\",\n \"sw\",\n \"tcf_*\",\n \"triggerDB\",\n \"trusted.caches\",\n \"workdir\",\n \"*.data*\",\n \"*.events\",\n \"*.py\",\n \"*.pyc\",\n \"*.root*\",\n \"JEM\",\n \"tmp*\",\n \"*.tmp\",\n \"*.TMP\",\n \"MC11JobOptions\",\n \"scratch\",\n \"jobState-*-test.pickle\",\n \"*.writing\",\n \"pwg*\",\n \"pwhg*\",\n \"*PROC*\",\n \"madevent\",\n \"HPC\",\n \"objectstore*.json\",\n \"saga\",\n \"radical\",\n \"ckpt*\"]\n\n # remove core and pool.root files from AthenaMP sub directories\n try:\n cleanupAthenaMP(workdir, outputfiles)\n except Exception, e:\n print(\"Failed to execute cleanupAthenaMP(): %s\" % (e))\n\n # explicitly remove any soft linked archives (.a files) since they will be dereferenced by the tar command (--dereference option)\n matches = []\n import fnmatch\n for root, dirnames, filenames in os.walk(workdir):\n for filename in fnmatch.filter(filenames, '*.a'):\n matches.append(os.path.join(root, filename))\n for root, dirnames, filenames in os.walk(os.path.dirname(workdir)):\n for filename in fnmatch.filter(filenames, 'EventService_premerge_*.tar'):\n matches.append(os.path.join(root, filename))\n if matches != []:\n for f in matches:\n remove(f)\n # else:\n # print(\"Found no archive files\")\n\n # note: these should be partitial file/dir names, not containing any wildcards\n exceptions_list = [\"runargs\", \"runwrapper\", \"jobReport\", \"log.\"]\n\n to_delete = []\n for _dir in dir_list:\n files = glob(os.path.join(workdir, _dir))\n exclude = []\n\n if files:\n for exc in exceptions_list:\n for f in files:\n if exc in f:\n exclude.append(os.path.abspath(f))\n\n _files = []\n for f in files:\n if not f in exclude:\n _files.append(os.path.abspath(f))\n to_delete += _files\n\n exclude_files = []\n for of in outputfiles:\n exclude_files.append(os.path.join(workdir, of))\n for f in to_delete:\n if not f in exclude_files:\n remove(f)\n\n # run a second pass to clean up any broken links\n broken = []\n for root, dirs, files in os.walk(workdir):\n for filename in files:\n path = os.path.join(root, filename)\n if os.path.islink(path):\n target_path = os.readlink(path)\n # Resolve relative symlinks\n if not os.path.isabs(target_path):\n target_path = os.path.join(os.path.dirname(path), target_path)\n if not os.path.exists(target_path):\n broken.append(path)\n else:\n # If it's not a symlink we're not interested.\n continue\n\n if broken:\n for p in broken:\n remove(p)\n\n return 0", "def unnormalize_files():\n mdir = mw.col.media.dir()\n try:\n # A quirk of certain Pythons is that some os commands give\n # different results when you put in a unicode object rather\n # than a str.\n mdir = unicode(mdir, sys.getfilesystemencoding())\n except TypeError:\n # Already unicode.\n pass\n media_in_col = mw.col.media.allMedia()\n # Filter the files on disk. Drop all files that do not contain\n # combining characters. Those should be no problem. (The Unicode\n # web page describes a \"quick test\", we do an even quicker test.)\n problem_files = []\n try:\n for f in progress(os.listdir(mdir), _(u\"Checking files on disk.\"),\n _(u\"Stop that!\")):\n for c in f:\n if unicodedata.combining(c):\n # We just assume that f is NFD-normalized. If not\n # we will just waste time later.\n problem_files.append(f)\n break\n except StopIteration:\n return\n try:\n for m in progress(media_in_col, _(u\"Unicode unnormalizing files.\"),\n _(u\"Stop that!\")):\n m_n = unicodedata.normalize('NFD', m)\n if m == m_n:\n continue\n if m_n in problem_files:\n shutil.move(os.path.join(mdir, m_n), os.path.join(mdir, m))\n except StopIteration:\n return", "def run(self):\n super(CleanUp, self).run()\n\n for dir_ in CleanUp.CLEANFOLDERS:\n if exists(dir_):\n print(\"Removing: {}\".format(dir_))\n if not self.dry_run and exists(dir_):\n rmtree(dir_)\n\n for dir_ in CleanUp.CLEANFOLDERSRECURSIVE:\n for pdir in self.dfind(dir_, \".\"):\n print(\"Remove folder {}\".format(pdir))\n rmtree(pdir)\n\n for fil_ in CleanUp.CLEANFILESRECURSIVE:\n for pfil in self.ffind(fil_, \".\"):\n print(\"Remove file {}\".format(pfil))\n os.unlink(pfil)", "def main():\n processSetOfCerFiles(sys.argv[1:])", "def clean(working_directory=None, args=None):\n\n from .cleanme import main\n if args is None:\n args = []\n return main(working_directory, args)", "def clean():\n for dirpath, dirnames, filenames in os.walk('.'):\n for filename in filenames:\n if filename.endswith('.pyc') or filename.endswith('.pyo'):\n full_pathname = os.path.join(dirpath, filename)\n click.echo('Removing {}'.format(full_pathname))\n os.remove(full_pathname)", "def do_maint (self):\n self.log.info (\"cleanup\")\n current = glob.glob (os.path.join (self.infodir, self.infomask))\n removed = set(self.infomap) - set(current)\n for fn in removed:\n self.log.debug (\"forgetting file %s\", fn)\n del self.infomap[fn]\n self.log.info (\"current: %i, removed: %i\", len(current), len(removed))\n self.maint_timer = threading.Timer (self.maint_period, self.do_maint)\n self.maint_timer.start()", "def unpackage():\n\n zipfileLoc = hou.ui.selectFile(title=\"please select a zipFile created by the package function\", pattern=\"*.zip\")\n if not zipfileLoc: \n \n return\n \n file_ = zipfile.ZipFile(hou.expandString(zipfileLoc), \"r\")\n\n isOke = False\n \n for name in file_.namelist():\n \n if name.endswith(\".hip\") or name.endswith(\".hipnc\"):\n \n isOke = True\n break\n \n if not isOke: \n \n return\n \n unpackLoc = hou.expandString(hou.ui.selectFile(title=\"please select a directory you wish to use to unpack the files to.\"))\n \n if not unpackLoc or not os.path.isdir(unpackLoc): \n \n return\n \n unzip(file_, unpackLoc)\n unpackageDir = os.path.dirname(file_.namelist()[0])\n otlsfiles = glob.glob(os.path.join(unpackLoc, unpackageDir, \"otls\", \"*\"))\n hipfile = glob.glob(os.path.join(unpackLoc, unpackageDir, \"*.hip*\"))\n \n if len(hipfile) != 1: \n \n return\n \n hou.hipFile.load(hipfile[0])\n \n for otl in otlsfiles:\n\n hou.hda.installFile(otl)", "def clean():\n clean_files()", "def remove_files(self, files: Set[str]) -> None:\n for f in files:\n src = os.path.join(self.get_directory(), f)\n os.remove(src)", "def clean_files(files):\n\n def _decorator(f):\n @wraps(f)\n def _wraps(*args, **kwargs):\n clean_filesystem(files)\n try:\n f(*args, **kwargs)\n finally:\n clean_filesystem(files)\n\n return _wraps\n\n return _decorator", "def clean_data():\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)", "def image_undistort():\n # read test images\n all_test_images = os.listdir('test_images')\n test_images = []\n for name in all_test_images:\n if name.endswith(\".jpg\"):\n test_images.append(name)\n # apply distortion correction on test images\n undistort_images(test_images, './camera_calib_dist_pickle.p')\n print(\"DONE: undistorted test-images saved\")", "def clean(files):\n\tfor file in files:\n\t\ttry:\n\t\t\tos.remove(file)\n\t\texcept Exception as e:\n\t\t\tprint(e)", "def dev_clean():\n clean_files(\"csv\", True)\n clean_files(\"jsontxt\", True)" ]
[ "0.57448184", "0.5654019", "0.5597988", "0.54939944", "0.54929745", "0.54213905", "0.53769475", "0.53547657", "0.53084433", "0.5301621", "0.5282862", "0.52690095", "0.5243867", "0.522146", "0.52008116", "0.51682776", "0.5164985", "0.5161235", "0.5154976", "0.51509416", "0.514135", "0.51310194", "0.51255035", "0.51204455", "0.51179254", "0.50974214", "0.50528675", "0.5041839", "0.50279593", "0.49979812" ]
0.72186744
0
This create the debian source control minimal tree structure based on the current buildout folder. It also copy the buildout DEBIAN folder to the package
def init_structure(self): dest = os.path.join(self.cwd, 'build', 'debian') self.mkdir_p(dest) struct = os.path.join(dest, self.cwd) self.mkdir_p(struct) # copytree_src = os.path.join(self.cwd, 'DEBIAN') # self.copytree(copytree_src, dest, symlinks=False, ignore=None) new_dest = os.path.join(dest, self.cwd[1:]) self.copytree( self.cwd, new_dest, symlinks=False, ignore=self.ignore )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_package(cwd):\n args = ['dpkg-buildpackage', '-us', '-uc']\n arch = parse_debian_control(cwd)['Architecture']\n if arch != 'all':\n args += ['--host-arch', arch]\n\n output, returncode = shell(args, cwd=cwd)\n logger.debug(output)\n if returncode:\n logger.error('failed to build package ☹')\n\n return returncode", "def move_files_into_build():\n build_root = os.path.join(template_path, 'build')\n create_python_package(build_root)\n\n build_buildbot = os.path.join(template_path, 'build', 'buildbot')\n create_python_package(build_buildbot)\n\n pythonify('runtests', [], ['build']) \n pythonify('master.cfg', ['buildbot'], ['build', 'buildbot'])", "def create_deb(ctx, target):\n skip_if_up_to_date(\n '{dist_dir}/{codename}'.format(\n dist_dir=ctx.build_dir.dist_dir,\n codename=target['codename']))(_build_debian_package)(ctx, target)", "def build(self, latest_branch_version):\n self.upstream_model.clone()\n self.upstream_model.checkout(\n ref=f\"tags/v{str(latest_branch_version)}\",\n force=True,\n cwd=self.upstream_model.name,\n )\n with tempfile.TemporaryDirectory() as tmpdir:\n self.log(f\"Building {self.deb_model.name} debian package\")\n self.deb_model.base.clone(cwd=tmpdir)\n self.deb_model.base.checkout(\n ref=f\"v{str(latest_branch_version)}\",\n force=True,\n cwd=f\"{tmpdir}/{self.deb_model.name}\",\n )\n self.bump_revision(cwd=f\"{tmpdir}/{self.deb_model.name}\")\n self.write_debversion(\n semver.VersionInfo.parse(latest_branch_version),\n src_path=Path(tmpdir) / self.deb_model.name,\n )\n cmd_ok(\n f\"cp -a {tmpdir}/{self.deb_model.name}/* {self.upstream_model.name}/.\",\n shell=True,\n )\n self.source(cwd=self.upstream_model.name)\n self.deb_model.base.add(\n [\"debian/changelog\"], cwd=f\"{tmpdir}/{self.deb_model.name}\"\n )\n self.deb_model.base.commit(\n \"Automated Build\", cwd=f\"{tmpdir}/{self.deb_model.name}\"\n )\n self.deb_model.base.push(\n ref=f\"v{str(latest_branch_version)}\",\n cwd=f\"{tmpdir}/{self.deb_model.name}\",\n )", "def build(root):", "def _create_package_dir(self):\n\n recursive = True if self.format == 'src' else False\n return self.create_dir(self.packageDir, recursive=recursive)", "def build(ctx):\n ctx.run(\"vsce package\", replace_env=False)", "def build(target_dir):\n prepare_demo_site(target_dir)\n\n patch_config(\n target_dir, (\"# CREATE_FULL_ARCHIVES = False\", \"CREATE_FULL_ARCHIVES = True\")\n )\n\n with cd(target_dir):\n __main__.main([\"build\"])", "def build_code(self):\n if not path.isfile(path.join(self.tmpdir, 'requirements.txt')):\n return\n\n oldpwd = os.getcwd()\n os.chdir(self.tmpdir)\n\n call('pip install --requirement requirements.txt --target .'.split(' '))\n\n # We need to create a __init__.py per code directory without it.\n # This is required to not create a PYTHONPATH with all directories.\n #\n for (current_dir, directories, files) in os.walk('.', topdown=False):\n if current_dir.endswith('.dist-info'):\n # This is a python metadirectory, we can skip it\n continue\n namespacer = path.join(current_dir, '__init__.py')\n if not path.isfile(namespacer):\n print(\"Creating file {0}\".format(namespacer))\n with open(namespacer, 'w') as nmf:\n nmf.write('# File Generated by lambdamanager packager')\n\n os.chdir(oldpwd)", "def finish(buildout):\n do = buildout['buildout'].get('debian-source-control', False)\n if not do:\n LOG.info('debian-source-control=False')\n return\n dsc = DebianSourceControl(buildout)\n dsc.init_structure()", "def build(session: nox.Session) -> None:\n\n dist_dir = DIR.joinpath(\"dist\")\n if dist_dir.exists():\n shutil.rmtree(dist_dir)\n\n session.install(\".[dev]\")\n session.run(\"flit\", \"build\")", "def generate_debootstrap_rootfs(self):\n\n logging.info(\"starting to generate debootstrap rootfs\")\n\n # Generate the base debootstrap command\n debootstrap_command = \"sudo debootstrap --no-check-gpg\"\n\n # Add the foreign and arch only if they are different from host, and\n # thus if use_qemu_static is True\n if self.use_qemu_static:\n logging.info(\"running debootstrap stage 1\")\n debootstrap_command += \" --foreign --arch=\" + self.project.target_arch\n else:\n logging.info(\"running debootstrap\")\n\n # Add the target, mount point and repository url to the debootstrap command\n debootstrap_command += \" \" + self.project.target_version + \" \"\n debootstrap_command += self.project.rootfs_mountpoint + \" \"\n debootstrap_command += self.project.project_definition[\"project-definition\"][\"debootstrap-repository\"]\n\n # Finally run the subprocess\n self.execute_command(debootstrap_command)\n\n # Check if we are working with foreign arch, then ...\n if self.use_qemu_static:\n # QEMU is used, and we have to install it into the target\n self.setup_qemu()\n\n # And second stage must be run\n logging.info(\"doing debootstrap stage 2\")\n debootstrap_command = \"LANG=C sudo chroot \" + self.project.rootfs_mountpoint\n debootstrap_command += \" /debootstrap/debootstrap --second-stage\"\n self.execute_command(debootstrap_command)\n\n\n # Mount bind /proc into the rootfs mountpoint\n sudo_command = \"sudo mount --bind --make-rslave /proc \" + self.project.rootfs_mountpoint + \"/proc\"\n self.execute_command(sudo_command)\n self.proc_is_mounted = True\n\n # Mount bind /dev/pts into the rootfs mountpoint\n sudo_command = \"sudo mount --bind --make-rslave /dev/pts \" + self.project.rootfs_mountpoint + \"/dev/pts\"\n self.execute_command(sudo_command)\n self.devpts_is_mounted = True\n\n # Mount bind /dev/shm into the rootfs mountpoint\n sudo_command = \"sudo mount --bind --make-rslave /dev/shm \" + self.project.rootfs_mountpoint + \"/dev/shm\"\n self.execute_command(sudo_command)\n self.devshm_is_mounted = True\n\n # Update the APT sources\n self.generate_apt_sources_configuration()\n\n # Then update the list of packages\n apt_command = \"sudo chroot \" + self.project.rootfs_mountpoint + \" /usr/bin/apt-get update\"\n self.execute_command(apt_command)\n\n # Install extra packages into the chroot\n apt_command = \"sudo chroot \" + self.project.rootfs_mountpoint + \" /usr/bin/apt-get install --no-install-recommends --yes --allow-unauthenticated apt-utils ansible\"\n self.execute_command(apt_command)\n\n # Generate a unique build timestamp into /etc/dft_version\n self.generate_build_number()", "def pkgbuildContentPackage( self, pars, directory ):\n\n return \"\"\"\\\n # Assemble your package in ${pkgdir}\n\"\"\"", "def pkgbuildContentBuild( self, pars, directory ):\n\n return \"\"\"\\\n # If your package requires compilation, insert your build code here\n cd \"${srcdir}/${pkgname}-${pkgver}\"\n echo Building ...\\\n\"\"\"", "def do_stage(self, mirror_only=False):\n super().do_stage(mirror_only)\n stsrc = self.stage.source_path\n srcpath = os.path.join( stsrc, self.build_directory )\n ppath = ancestor (srcpath)\n shutil.move(stsrc, stsrc+\"_old\")\n mkdirp(ppath)\n shutil.move(stsrc+\"_old\",srcpath)", "def tweak_new_filesystem(root_dir):\n\n # create a symlink for insserv\n force_symlink('../usr/lib/insserv/insserv',\n os.path.join(root_dir, 'sbin/insserv'))\n\n # create a symlink for awk\n force_symlink('mawk', os.path.join(root_dir, 'usr/bin/awk'))\n\n # Nvidia keeps packaging up a broken post-install script for their cudnn\n # deb. Freaking nvidia\n cudnn_postinst_path = 'var/lib/dpkg/info/libcudnn6-dev.postinst'\n cudnn_postinst_path = os.path.join(root_dir, cudnn_postinst_path)\n\n if os.path.exists(cudnn_postinst_path):\n with open(cudnn_postinst_path, 'r') as infile:\n content = infile.read()\n if not content.startswith(\"#!\"):\n with open(cudnn_postinst_path, 'w') as outfile:\n outfile.write('#! /bin/sh\\n')\n outfile.write(content)\n\n # NOTE(josh): patch the base-packages post-install hook so it doesn't\n # complain about files in /var/run\n basefiles_path = os.path.join(root_dir,\n 'var/lib/dpkg/info/base-files.postinst')\n if os.path.exists(basefiles_path):\n apply_patch_text(BASE_FILES_PATCH, root_dir)\n\n # NOTE(josh): ifupdown should depend on initscripts, but it doesn't\n status_path = os.path.join(root_dir, 'var/lib/dpkg/status')\n tempfile_path = status_path + '.tmp'\n with open(tempfile_path, 'wb') as outfile:\n with open(status_path, 'rb') as infile:\n for line in infile:\n outfile.write(line)\n if line.strip() == 'Package: ifupdown':\n break\n\n for line in infile:\n if line.startswith('Depends: '):\n line = ', '.join(line.strip().split(', ') + ['initscripts']) + '\\n'\n outfile.write(line)\n break\n else:\n outfile.write(line)\n\n for line in infile:\n outfile.write(line)\n os.rename(tempfile_path, status_path)\n\n # NOTE(josh): resolvconf tries to a write a file in this directory\n try:\n target_path = os.path.join(root_dir, 'run/resolvconf/interface')\n os.makedirs(target_path)\n except OSError:\n if not os.path.isdir(target_path):\n raise\n\n # NOTE(josh): Can't postinst makedev without CAP_MKNOD\n if os.getuid() != 0:\n makedev_postinst = os.path.join(root_dir,\n 'var/lib/dpkg/info/makedev.postinst')\n if os.path.exists(makedev_postinst):\n os.rename(makedev_postinst, makedev_postinst + '.bak')\n\n # remove temporary/boostrap files\n files_to_remove = ['etc/apt/sources.list.d/bootstrap.list']\n\n for filename in files_to_remove:\n file_path = os.path.join(root_dir, filename)\n if os.path.exists(file_path):\n os.remove(file_path)", "def build_deb():\n subprocess.check_call(['dpkg-deb', '--build', 'build', 'qastetray.deb'],\n stdout=subprocess.DEVNULL)", "def make_dir_structure(self, out):\n program_folder = os.path.join(out, self.out)\n self.make_output_dir(program_folder)\n self.make_config_dirs(program_folder)\n return None", "def convert(self):\n with TemporaryDirectory(prefix='py2deb-build-') as build_directory:\n\n # Prepare the absolute pathname of the Python interpreter on the\n # target system. This pathname will be embedded in the first line\n # of executable scripts (including the post-installation and\n # pre-removal scripts).\n python_executable = '/usr/bin/%s' % python_version()\n\n # Unpack the binary distribution archive provided by pip-accel inside our build directory.\n build_install_prefix = os.path.join(build_directory, self.converter.install_prefix.lstrip('/'))\n self.converter.pip_accel.bdists.install_binary_dist(\n members=self.transform_binary_dist(python_executable),\n prefix=build_install_prefix,\n python=python_executable,\n virtualenv_compatible=False,\n )\n\n # Determine the directory (at build time) where the *.py files for\n # Python modules are located (the site-packages equivalent).\n if self.has_custom_install_prefix:\n build_modules_directory = os.path.join(build_install_prefix, 'lib')\n else:\n # The /py*/ pattern below is intended to match both /pythonX.Y/ and /pypyX.Y/.\n dist_packages_directories = glob.glob(os.path.join(build_install_prefix, 'lib/py*/dist-packages'))\n if len(dist_packages_directories) != 1:\n msg = \"Expected to find a single 'dist-packages' directory inside converted package!\"\n raise Exception(msg)\n build_modules_directory = dist_packages_directories[0]\n\n # Determine the directory (at installation time) where the *.py\n # files for Python modules are located.\n install_modules_directory = os.path.join('/', os.path.relpath(build_modules_directory, build_directory))\n\n # Execute a user defined command inside the directory where the Python modules are installed.\n command = self.converter.scripts.get(self.python_name.lower())\n if command:\n execute(command, directory=build_modules_directory, logger=logger)\n\n # Determine the package's dependencies, starting with the currently\n # running version of Python and the Python requirements converted\n # to Debian packages.\n dependencies = [python_version()] + self.debian_dependencies\n\n # Check if the converted package contains any compiled *.so files.\n object_files = find_object_files(build_directory)\n if object_files:\n # Strip debugging symbols from the object files.\n strip_object_files(object_files)\n # Determine system dependencies by analyzing the linkage of the\n # *.so file(s) found in the converted package.\n dependencies += find_system_dependencies(object_files)\n\n # Make up some control file fields ... :-)\n architecture = self.determine_package_architecture(object_files)\n control_fields = unparse_control_fields(dict(package=self.debian_name,\n version=self.debian_version,\n maintainer=self.debian_maintainer,\n description=self.debian_description,\n architecture=architecture,\n depends=dependencies,\n priority='optional',\n section='python'))\n\n # Automatically add the Mercurial global revision id when available.\n if self.vcs_revision:\n control_fields['Vcs-Hg'] = self.vcs_revision\n\n # Apply user defined control field overrides from `stdeb.cfg'.\n control_fields = self.load_control_field_overrides(control_fields)\n\n # Create the DEBIAN directory.\n debian_directory = os.path.join(build_directory, 'DEBIAN')\n os.mkdir(debian_directory)\n\n # Generate the DEBIAN/control file.\n control_file = os.path.join(debian_directory, 'control')\n logger.debug(\"Saving control file fields to %s: %s\", control_file, control_fields)\n with open(control_file, 'wb') as handle:\n control_fields.dump(handle)\n\n # Lintian is a useful tool to find mistakes in Debian binary\n # packages however Lintian checks from the perspective of a package\n # included in the official Debian repositories. Because py2deb\n # doesn't and probably never will generate such packages some\n # messages emitted by Lintian are useless (they merely point out\n # how the internals of py2deb work). Because of this we silence\n # `known to be irrelevant' messages from Lintian using overrides.\n if self.converter.lintian_ignore:\n overrides_directory = os.path.join(\n build_directory, 'usr', 'share', 'lintian', 'overrides',\n )\n overrides_file = os.path.join(overrides_directory, self.debian_name)\n os.makedirs(overrides_directory)\n with open(overrides_file, 'w') as handle:\n for tag in self.converter.lintian_ignore:\n handle.write('%s: %s\\n' % (self.debian_name, tag))\n\n # Find the alternatives relevant to the package we're building.\n alternatives = set((link, path) for link, path in self.converter.alternatives\n if os.path.isfile(os.path.join(build_directory, path.lstrip('/'))))\n\n # Generate post-installation and pre-removal maintainer scripts.\n self.generate_maintainer_script(filename=os.path.join(debian_directory, 'postinst'),\n python_executable=python_executable,\n function='post_installation_hook',\n package_name=self.debian_name,\n alternatives=alternatives,\n modules_directory=install_modules_directory,\n namespaces=self.namespaces)\n self.generate_maintainer_script(filename=os.path.join(debian_directory, 'prerm'),\n python_executable=python_executable,\n function='pre_removal_hook',\n package_name=self.debian_name,\n alternatives=alternatives,\n modules_directory=install_modules_directory,\n namespaces=self.namespaces)\n\n # Enable a user defined Python callback to manipulate the resulting\n # binary package before it's turned into a *.deb archive (e.g.\n # manipulate the contents or change the package metadata).\n if self.converter.python_callback:\n logger.debug(\"Invoking user defined Python callback ..\")\n self.converter.python_callback(self.converter, self, build_directory)\n logger.debug(\"User defined Python callback finished!\")\n\n return build_package(directory=build_directory,\n check_package=self.converter.lintian_enabled,\n copy_files=False)", "def generate(env):\n## doxyfile_scanner = env.Scanner(## DoxySourceScan,\n## \"DoxySourceScan\",\n## scan_check = DoxySourceScanCheck,\n##)\n\n if targz.exists(env):\n srcdist_builder = targz.makeBuilder(srcDistEmitter)\n\n env['BUILDERS']['SrcDist'] = srcdist_builder", "def build():\n click.secho('Creating package ...')\n pkg = utils.create_package()\n click.secho('Package created: {}'.format(pkg), fg='green')\n click.secho('Creating wheel...')\n wheel_path = utils.create_wheel()\n click.secho('Wheel created in {}'.format(wheel_path), fg='green')", "def build(buildout=None, newest=False):\n n = 'n' if newest else 'N'\n with cd(path()):\n if not buildout: # then use the deployment config\n buildout = '../{buildout}'.format(buildout=deploy_cfg())\n if not remote_exists(buildout, use_sudo=True):\n buildout = buildout[3:]\n sudo('bin/buildout -{n} -c {buildout}'.format(buildout=buildout, n=n),\n user=env.account)", "def install_project_structure():\n from .project import static_base, use_static\n\n with sudo():\n info('Install application directory structure')\n\n create_app_root()\n\n if use_static():\n # Create static web paths\n static_path = os.path.join(static_base(), 'static')\n media_path = os.path.join(static_base(), 'media')\n debian.mkdir(static_path, group='www-data', mode=1775)\n debian.mkdir(media_path, group='www-data', mode=1775)", "def package():\n call([sys.executable, \"setup.py\", \"clean\", \"--all\", \"bdist_egg\"], cwd=\"src\")\n call([sys.executable, \"setup.py\", \"clean\", \"--all\", \"bdist_wheel\"], cwd=\"src\")", "def build_and_deploy():\n\n with shell_env(TZ=_get_timezone()):\n _create_output_branch()\n _build_html()\n _git_commit_all()\n _git_push(_get_output_branch())", "def task_prepare_build():\n\n import sys\n\n python_path = sys.executable.split(os.sep)\n venv_path = str(Path(os.sep.join(python_path[:-2])))\n\n def get_dst_path():\n import platform\n\n print(f\"Going on with {venv_path} as the virtual environment exclusively used for using pyinstaller.\")\n arch = platform.system()\n if arch == \"Windows\":\n return Path(venv_path) / \"Lib/site-packages/mad_gui/qt_designer/build/\"\n if arch in [\"Linux\", \"Darwin\"]:\n python_dirs = os.listdir(Path(venv_path) / \"lib/\")\n warnings.warn(\n f\"dodo.py: Assuming your python 3.7 installation is in {Path(venv_path)}/lib/{python_dirs[0]}\"\n )\n return Path(venv_path) / \"lib\" / python_dirs[0] / \"site-packages/mad_gui/qt_designer/build/\"\n raise ValueError(\"What operating system is this?!\")\n\n def set_up_paths():\n if not os.path.exists(get_dst_path().parent):\n raise FileNotFoundError(\n \"Apparently mad_gui is not installed in this environemnt. Use `pip install . ` to do so.\"\n )\n dst_path = get_dst_path()\n os.makedirs(dst_path, exist_ok=True)\n\n def convert_ui_to_py():\n dst_path = get_dst_path()\n ui_files = [file for file in os.listdir(dst_path.parent) if \".ui\" in file]\n print(\"\\n\")\n for file in ui_files:\n print(f\"Converting from: {dst_path.parent}{os.sep}{file}\")\n print(f\"To: {dst_path}{os.sep}{file.split('.')[0]}.py\\n\")\n os.popen(f\"pyside2-uic -o {dst_path}{os.sep}{file.split('.')[0]}.py {dst_path.parent}{os.sep}{file}\")\n\n print(\n \"Info: These conversion should take place in the virutal environment you are going to use with \"\n \"pyinstaller.\"\n )\n\n return {\n \"actions\": [set_up_paths, convert_ui_to_py],\n \"verbosity\": 2,\n }", "def bundle(self):\n\n try:\n self.build_directory.mkdir(parents=True)\n except FileExistsError:\n logger.warning('Directory already exists: %s', self.build_directory)\n decision = input(\n f'{self.build_directory} already exists. Overwrite? Y/[N]: '\n )\n if decision.strip().upper() == 'Y':\n logger.info('Deleting old build directory: %s', self.build_directory)\n shutil.rmtree(self.build_directory)\n self.build_directory.mkdir(parents=True)\n else:\n return\n\n with cd(self.app_directory):\n self._install_dependencies()\n self._handle_supplemental_data()\n self._cleanup_files()\n if self.make_zip:\n self._zip_files()", "def copy_files():\n os.makedirs('build/usr/lib/python3/dist-packages', exist_ok=True)\n os.makedirs('build/usr/share/doc', exist_ok=True)\n\n shutil.copytree('applications', 'build/usr/share/applications')\n shutil.copytree('doc', 'build/usr/share/doc/qastetray')\n shutil.copytree('icons', 'build/usr/share/icons')\n shutil.copytree('locale', 'build/usr/share/locale')\n shutil.copytree('qastetray',\n 'build/usr/lib/python3/dist-packages/qastetray')", "def _build(self, notebook: None = None) -> None:\n packages = self._write_templates(notebook=notebook)\n\n if not os.path.isfile(os.path.join(_DIRECTORY, 'package.json')):\n packagejson = os.path.join(self._package_dir, 'src/package.json')\n shutil.copy(packagejson, _DIRECTORY)\n\n if not os.path.isfile(os.path.join(_DIRECTORY, 'webpack.prod.json')):\n webpackprod = os.path.join(self._package_dir, 'src/webpack.prod.js')\n shutil.copy(webpackprod, _DIRECTORY)\n\n if not os.path.isfile(os.path.join(_DIRECTORY, 'webpack.dev.json')):\n webpackdev = os.path.join(self._package_dir, 'src/webpack.dev.js')\n shutil.copy(webpackdev, _DIRECTORY)\n\n if run(['yarn', '--ignore-engines', 'install'], notebook=notebook) > 1:\n raise YarnError('Error installing node packages')\n\n if packages:\n installed = installed_packages()\n new_packages = [x for x in packages if x.split('@')[0] not in installed]\n\n if new_packages:\n retval = run(['yarn', '--ignore-engines', 'add'] + new_packages, notebook=notebook)\n if retval > 1:\n raise YarnError('Error installing node packages')\n elif retval == 1:\n print('Yarn error but trying to continue build')\n retval = run([_WEBPACK, '--config', 'webpack.dev.js'], notebook=notebook)\n if retval != 0:\n raise WebpackError('Error building with webpack')", "def _generate_build_source_package(self):\n # Workflow sources for cloud executor must all be under same workdir root\n for filename in self.workflow_sources:\n if self.workdir not in filename:\n raise WorkflowError(\n \"All source files must be present in the working directory, \"\n \"{workdir} to be uploaded to a build package that respects \"\n \"relative paths, but {filename} was found outside of this \"\n \"directory. Please set your working directory accordingly, \"\n \"and the path of your Snakefile to be relative to it.\".format(\n workdir=self.workdir, filename=filename\n )\n )\n\n # We will generate a tar.gz package, renamed by hash\n tmpname = next(tempfile._get_candidate_names())\n targz = os.path.join(tempfile.gettempdir(), \"snakemake-%s.tar.gz\" % tmpname)\n tar = tarfile.open(targz, \"w:gz\")\n\n # Add all workflow_sources files\n for filename in self.workflow_sources:\n arcname = filename.replace(self.workdir + os.path.sep, \"\")\n tar.add(filename, arcname=arcname)\n\n tar.close()\n\n # Rename based on hash, in case user wants to save cache\n sha256 = get_file_hash(targz)\n hash_tar = os.path.join(\n self.workflow.persistence.aux_path, \"workdir-{}.tar.gz\".format(sha256)\n )\n\n # Only copy if we don't have it yet, clean up if we do\n if not os.path.exists(hash_tar):\n shutil.move(targz, hash_tar)\n else:\n os.remove(targz)\n\n # We will clean these all up at shutdown\n self._build_packages.add(hash_tar)\n\n return hash_tar" ]
[ "0.6261107", "0.6206168", "0.6162178", "0.6102843", "0.60703856", "0.60544455", "0.602594", "0.60136557", "0.600986", "0.60017514", "0.59842265", "0.5976109", "0.59176093", "0.59169763", "0.5903615", "0.589208", "0.5881746", "0.5874955", "0.5868495", "0.58510756", "0.58366024", "0.58254963", "0.581019", "0.58000493", "0.5796538", "0.57830304", "0.5778544", "0.57156265", "0.57154566", "0.5714249" ]
0.7112945
0
Test that parsing a config produces the expected ModuleConfig object.
def test_parse_config(self): user_config = {"weighted_display_name_like": "testabc [SoMeThInG]"} _, _, module_config = create_user_directory_search_module_with_config( user_config ) # Check that the generated config contains what we expect self.assertEqual( module_config.weighted_display_name_like, user_config["weighted_display_name_like"], )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_config_class():\n assert config is not None", "def test_parser():\n\n parser = configparser.RawConfigParser()\n version = '1.2.3'\n string = 'string-value'\n bool = 'False'\n literal = \"['a', 'b', 'c']\"\n literal2 = '1.23'\n section = 'dashboard'\n\n parser.add_section(section)\n parser.set(section, 'APP_VERSION', version)\n parser.set(section, 'string', string)\n parser.set(section, 'bool', bool)\n parser.set(section, 'literal', literal)\n parser.set(section, 'literal2', literal2)\n\n assert parse_version(parser, section, 'default') == version\n assert parse_string(parser, section, 'string', 'default') == string\n assert not parse_bool(parser, section, 'bool', 'True')\n assert parse_literal(parser, section, 'literal', 'default') == ['a', 'b', 'c']\n assert parse_literal(parser, section, 'literal2', 'default') == 1.23", "def test_basic_parse(config, expected):\n if isinstance(expected, dict):\n raw_dict = call(config)\n assert expected == raw_dict\n elif issubclass(expected, ValueError):\n with pytest.raises(ValueError):\n raw_dict = call(config)\n elif issubclass(expected, FileNotFoundError):\n with pytest.raises(FileNotFoundError):\n raw_dict = call(config)\n elif issubclass(expected, TypeError):\n with pytest.raises(TypeError):\n raw_dict = call(config)\n elif issubclass(expected, KeyError):\n with pytest.raises(KeyError):\n raw_dict = call(config)\n else:\n raise ValueError(f\"expected {expected} not accounted for\")", "def test_basic_parse(config, expected):\n if isinstance(expected, dict):\n raw_dict = call(config)\n assert expected == raw_dict\n elif issubclass(expected, ValueError):\n with pytest.raises(ValueError):\n raw_dict = call(config)\n elif issubclass(expected, FileNotFoundError):\n with pytest.raises(FileNotFoundError):\n raw_dict = call(config)\n elif issubclass(expected, TypeError):\n with pytest.raises(TypeError):\n raw_dict = call(config)\n elif issubclass(expected, KeyError):\n with pytest.raises(KeyError):\n raw_dict = call(config)\n else:\n raise ValueError(f\"expected {expected} not accounted for\")", "def test_basic_parse(config, expected):\n if isinstance(expected, dict):\n raw_dict = call(config)\n assert expected == raw_dict\n elif expected is None:\n raw_dict = call(config)\n assert expected == raw_dict\n elif issubclass(expected, ValueError):\n with pytest.raises(ValueError):\n raw_dict = call(config)\n elif issubclass(expected, FileNotFoundError):\n with pytest.raises(FileNotFoundError):\n raw_dict = call(config)\n elif issubclass(expected, TypeError):\n with pytest.raises(TypeError):\n raw_dict = call(config)\n elif issubclass(expected, KeyError):\n with pytest.raises(KeyError):\n raw_dict = call(config)\n else:\n raise ValueError(f\"expected {expected} not accounted for\")", "def test_config_object():\n assert isinstance(CFG, Configuration)", "def test_config_from_file(self):\n parser = Parser()\n args = parser.parser.parse_args(['-c'])\n if args.config:\n config = Config()\n config.config_file = \"./config\"\n config.config = test_config\n config.config_from_file()\n self.assertTrue(config.config)\n os.remove(config.config_file)", "def test_config_to_dict_py2(self):\n if PYTHON_VERSION > 2:\n return\n\n from ConfigParser import ConfigParser\n fixture = ConfigParser()\n fixture.add_section('something')\n fixture.set('something', 'value', 'stuff')\n\n self.assertEqual({ 'something': { 'value': 'stuff' } }, config_to_dict(fixture))", "def test_config(self, mocked_callable_loader, mocked_load_config):\n config_filename = 'aconfigfile'\n finder = importer.Finder(config_filename)\n mocked_load_config.assert_called_once_with(config_filename)\n\n module_config = finder.config.get('fake_package.fake_module')\n self.assertTrue(module_config is not None)\n self.assertTrue('callable' in module_config)\n self.assertTrue('config' in module_config)", "def test_config_to_dict_py3(self):\n if PYTHON_VERSION < 3:\n return\n\n from configparser import ConfigParser\n fixture = ConfigParser()\n fixture['something'] = { 'value': 'stuff' }\n\n self.assertEqual({ 'something': { 'value': 'stuff' } }, config_to_dict(fixture))", "def test_expected_config(expectedconfig):\n expected = expectedconfig.read_text()\n config = CONFIGSDIR / expectedconfig.name\n\n assert dumpconfig(config) == expected", "def test_loads_a_config_file(self):\n from test.resources import config\n self.assertIsInstance(config, type(sys))\n self.assertIsNotNone(config.example)\n self.assertEqual(config.example.config_option, 'config-value')", "def parse_config(config: Dict) -> ModuleConfig:\n return ModuleConfig(\n weighted_display_name_like=config.get(\"weighted_display_name_like\")\n )", "def test_config_is_loaded(config):\n assert config[\"DEBUG\"] is False", "def assert_python_module(config: Config) -> Config:\n spec = importlib.util.spec_from_file_location(\n config.name,\n config.path\n )\n\n if spec:\n config.module = importlib.util.module_from_spec(spec)\n spec.loader.exec_module(config.module)\n\n return config\n\n raise Exception(\"{} is not a valid python module\".format(plugin))", "def test_parse_config(self):\n config_file = os.path.join('top', 'conf', 'top.conf')\n\n self._c.set_config_file(config_file)\n self._c.parse_config()\n\n received = self._c.adp_loop\n expected = 30\n msg = 'AdpB2CConfig.adp_loop error'\n self.assertEqual(received, expected, msg)\n\n received = self._c.adp_dirs\n expected = ['/var/ftp/pub/nparcel/adp/in']\n msg = 'AdpB2CConfig.adp_dirs error'\n self.assertListEqual(received, expected, msg)\n\n received = self._c.archive_dir\n expected = '/data/top/archive'\n msg = 'AdpB2CConfig.archive_dir error'\n self.assertEqual(received, expected, msg)\n\n received = self._c.adp_file_formats\n expected = []\n msg = 'AdpB2CConfig.adp_file_formats error'\n self.assertListEqual(received, expected, msg)\n\n # For the default configuration file the [db] section is blank\n received = self._c.db_kwargs()\n msg = 'AdpB2CConfig.db_kwargs error'\n self.assertIsNone(received, msg)\n\n received = self._c.code_header\n expected = 'TP Code'\n msg = 'AdpB2CConfig.code_header error'\n self.assertEqual(received, expected, msg)\n\n received = self._c.adp_headers\n expected = {'agent.code': 'TP Code',\n 'agent.dp_code': 'DP Code',\n 'agent.name': 'ADP Name',\n 'agent.address': 'Address',\n 'agent.suburb': 'Suburb',\n 'agent.state': 'State',\n 'agent.postcode': 'Postcode',\n 'agent.opening_hours': 'Opening Hours',\n 'agent.notes': 'Notes',\n 'agent.parcel_size_code': 'ADP Accepts Parcel Size',\n 'agent.phone_nbr': 'Phone',\n 'agent.contact_name': 'Contact',\n 'agent.email': 'Email',\n 'agent.fax_nbr': 'Fax',\n 'agent.latitude': 'Latitude',\n 'agent.longitude': 'Longitude',\n 'agent.status': 'Active',\n 'delivery_partner.id': 'DP Id',\n 'login_account.username': 'Username'}\n msg = 'AdpB2CConfig.adp.headers error'\n self.assertDictEqual(received, expected, msg)\n\n received = self._c.delivery_partners\n expected = ['Nparcel', 'ParcelPoint', 'Toll', 'National Storage']\n msg = 'AdpB2CConfig.adp.delivery_partners error'\n self.assertListEqual(received, expected, msg)\n\n received = self._c.adp_default_passwords\n expected = {'nparcel': 'aaaa',\n 'parcelpoint': 'bbbb',\n 'toll': 'cccc',\n 'national storage': 'dddd'}\n msg = 'AdpB2CConfig.adp_default_passwords error'\n self.assertDictEqual(received, expected, msg)", "def test_config():\n args = Namespace(molecule=\"nucleotide\", verbose=False)\n config = core.Config.from_args(args)\n assert config.verbose is False\n assert config.molecule == 'nucleotide'\n assert config.extended_validation == 'none'\n\n args = Namespace(molecule=\"protein\", verbose=True)\n config = core.Config.from_args(args)\n assert config.verbose is True\n assert config.molecule == 'protein'", "def testConfigA(self):\n assert type(self.config) == dict, \"Read setting not returning a dictionary\"", "def test_init_from(config):\n\n config.init_from()\n config.init_from(file='../../config.cfg')", "def __parse(self, to_parse):\n path = Path(to_parse)\n if not path.exists():\n raise FileNotFoundError(f\"Configuration file {path.absolute()} not found.\")\n else:\n with path.open() as file:\n data = json.load(file)\n\n if \"name\" not in data:\n raise AssertionError(f\"Missing fundamental parameter: name\")\n Validator(\n [\n (data[\"name\"], str),\n (data[\"include\"] if \"include\" in data else {}, dict),\n (data[\"modules\"] if \"modules\" in data else [], list),\n ]\n )\n # Input parsing\n if \"include\" in data:\n name = data[\"name\"]\n data = self.validate_include(data[\"include\"])\n data[\"name\"] = name\n elif \"modules\" not in data:\n raise AssertionError(\"No modules neither includes are defined.\")\n\n if \"args\" in data:\n Validator().dict(data[\"args\"])\n self.__get_modules(data)", "def test_node_config() -> None:\n node = MyNode()\n node.configure(\n MyConfig(\n int_field=5,\n str_field=\"hello\",\n float_field=0.5,\n int_enum_field=MyIntEnum.B,\n str_enum_field=MyStrEnum.A,\n bool_field=True,\n )\n )\n node.setup()", "def test_load_config(self):\n config = copyclipper.LoadConfig()\n self.assertTrue(len(config) > 0)", "def test_collect_configuration(self):\n sample_config = \"\"\"[dyndnsc]\nconfigs = testconfig\n\n[testconfig]\nuse_preset = testpreset\nupdater-userid = bob\nupdater-password = XYZ\n# test overwriting a preset value:\ndetector-url = http://myip.example.com/\n\n[preset:testpreset]\nupdater = fubarUpdater\nupdater-url = https://update.example.com/nic/update\nupdater-moreparam = some_stuff\ndetector = webcheck4\ndetector-family = INET\ndetector-url = http://ip.example.com/\ndetector-parser = plain\n \"\"\"\n p = configparser.ConfigParser()\n p.readfp(StringIO(sample_config)) # XXX readfp() is deprecated since py 3.2\n config = collect_config(p)\n self.assertEqual(dict, type(config))\n self.assertTrue('testconfig' in config)\n self.assertTrue('detector' in config['testconfig'])\n self.assertTrue(isinstance(config['testconfig']['detector'], list))\n self.assertEqual(1, len(config['testconfig']['detector']))\n detector, detector_opts = config['testconfig']['detector'][-1]\n self.assertEqual(detector, \"webcheck4\") # from the preset\n self.assertEqual(detector_opts['url'], \"http://myip.example.com/\") # from the user conf\n self.assertTrue('updater' in config['testconfig'])\n self.assertTrue(isinstance(config['testconfig']['updater'], list))\n self.assertEqual(1, len(config['testconfig']['updater']))\n updater = config['testconfig']['updater'][0]\n self.assertEqual(\"fubarUpdater\", updater[0])\n self.assertTrue(\"url\" in updater[1])\n self.assertTrue(\"moreparam\" in updater[1])\n self.assertEqual(\"some_stuff\", updater[1][\"moreparam\"])", "def test_config_from_json(self):\n\n # Make json-file\n path = self.write_temp_file(\"\"\"\n{\n \"section1\": {\n \"string1\": \"\",\n \"string2\": \"string2\",\n \"int1\": 0,\n \"int2\": 1,\n \"float1\": 0.0,\n \"float2\": 1.1,\n \"boolean1\": false,\n \"boolean2\": true\n },\n \"section2\": {\n \"string2\": \"string2\",\n \"int2\": 2,\n \"float2\": 2.2,\n \"boolean2\": false\n }\n}\n\"\"\")\n\n for namespace in [None, 'namespace']:\n config = Config()\n config.load_from_json(path, namespace=namespace)\n\n namespace_prefix = '%s.' % namespace if namespace is not None else ''\n\n # Test section 1\n self.assert_equal_deep(8, len(config('%ssection1' % namespace_prefix)))\n self.assert_equal_deep('', config('%ssection1.string1' % namespace_prefix))\n self.assert_equal_deep('string2', config('%ssection1.string2' % namespace_prefix))\n self.assert_equal_deep(0, config('%ssection1.int1' % namespace_prefix))\n self.assert_equal_deep(1, config('%ssection1.int2' % namespace_prefix))\n self.assert_equal_deep(0.0, config('%ssection1.float1' % namespace_prefix))\n self.assert_equal_deep(1.1, config('%ssection1.float2' % namespace_prefix))\n self.assert_equal_deep(False, config('%ssection1.boolean1' % namespace_prefix))\n self.assert_equal_deep(True, config('%ssection1.boolean2' % namespace_prefix))\n\n # Test section 2\n self.assert_equal_deep(4, len(config('%ssection2' % namespace_prefix)))\n self.assert_equal_deep('string2', config('%ssection2.string2' % namespace_prefix))\n self.assert_equal_deep(2, config('%ssection2.int2' % namespace_prefix))\n self.assert_equal_deep(2.2, config('%ssection2.float2' % namespace_prefix))\n self.assert_equal_deep(False, config('%ssection2.boolean2' % namespace_prefix))\n\n # Test section 3\n self.assert_equal(None, config('%ssection3' % namespace_prefix))", "def test_config_ok_config(self):\n test_data = (\"[gnupg]\\n\"\n \"recipients = [email protected]\\n\"\n \"signer = [email protected]\\n\"\n \"\\n\"\n \"[amazon-s3]\\n\"\n \"access_key = ACCESSKEY\\n\"\n \"secret_access_key = SECRETACCESSKEY\\n\"\n \"\\n\"\n \"[data]\\n\"\n \"\\n\"\n \"bucket = DATABUCKET\\n\"\n \"[metadata]\\n\"\n \"bucket = METADATABUCKET\\n\"\n \"\\n\")\n if os.path.isfile(\"test_config.conf\"):\n os.remove(\"test_config.conf\")\n file(\"test_config.conf\", \"wb\").write(test_data)\n config = Config(\"test_config.conf\")\n self.assertIn(\"gnupg\", config.config.sections())\n self.assertIn(\"amazon-s3\", config.config.sections())\n self.assertEqual(config.config.get(\n \"gnupg\", \"recipients\"), \"[email protected]\")\n self.assertEqual(config.config.get(\n \"gnupg\", \"signer\"), \"[email protected]\")\n self.assertEqual(config.config.get(\n \"amazon-s3\", \"access_key\"), \"ACCESSKEY\")\n self.assertEqual(config.config.get(\n \"amazon-s3\", \"secret_access_key\"), \"SECRETACCESSKEY\")\n self.assertEqual(config.config.get(\n \"data\", \"bucket\"), \"DATABUCKET\")\n self.assertEqual(config.config.get(\n \"metadata\", \"bucket\"), \"METADATABUCKET\")\n os.remove(\"test_config.conf\")", "def testParse(self):\n config_path = GetTestFilePath('valid/config.yaml')\n lab_config_pb = None\n with open(config_path, 'r') as f:\n lab_config_pb = lab_config.Parse(f)\n\n self.assertEqual('lab1', lab_config_pb.lab_name)\n self.assertEqual('lab_user1', lab_config_pb.host_login_name)\n self.assertEqual(['lab_user1', 'user1'], lab_config_pb.owners)\n self.assertEqual('tfc_url', lab_config_pb.control_server_url)\n self.assertEqual('lab_docker_image', lab_config_pb.docker_image)\n self.assertEqual('docker_server_1', lab_config_pb.docker_server)\n self.assertEqual('AStringToRepresentApiKey', lab_config_pb.engprod_api_key)\n self.assertTrue(lab_config_pb.enable_stackdriver)\n self.assertTrue(lab_config_pb.enable_autoupdate)\n self.assertTrue(lab_config_pb.enable_ui_update)\n self.assertEqual(lab_config_pb2.ON_PREMISE, lab_config_pb.operation_mode)\n self.assertEqual('path/to/key.json',\n lab_config_pb.service_account_json_key_path)\n self.assertEqual('lab_sv_key',\n lab_config_pb.service_account_key_secret_id)\n self.assertEqual('secret_project_id',\n lab_config_pb.secret_project_id)\n self.assertEqual('[email protected]',\n lab_config_pb.service_account)\n self.assertEqual(\n '-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null '\n '-F /path/to/ssh/config -C',\n lab_config_pb.ssh_arg)\n self.assertEqual(2, len(lab_config_pb.cluster_configs))\n cluster = lab_config_pb.cluster_configs[0]\n self.assertEqual('cluster1', cluster.cluster_name)\n self.assertEqual('user1', cluster.host_login_name)\n self.assertEqual(['user1', 'user2'], cluster.owners)\n self.assertEqual('path/to/config.xml', cluster.tf_global_config_path)\n self.assertEqual('tfc_url', cluster.control_server_url)\n self.assertTrue(cluster.graceful_shutdown)\n self.assertEqual(600, cluster.shutdown_timeout_sec)\n self.assertTrue(cluster.enable_stackdriver)\n self.assertTrue(cluster.enable_autoupdate)\n self.assertEqual(['--arg1', 'value1'], cluster.extra_docker_args)\n self.assertEqual('gcr.io/dockerized-tradefed/tradefed:golden',\n cluster.docker_image)\n self.assertEqual('docker_server_2', cluster.docker_server)\n self.assertEqual(2, len(cluster.tmpfs_configs))\n self.assertEqual('/atmpfs', cluster.tmpfs_configs[0].path)\n self.assertEqual(1000, cluster.tmpfs_configs[0].size)\n self.assertEqual('/btmpfs', cluster.tmpfs_configs[1].path)\n self.assertEqual(3, len(cluster.host_configs))\n self.assertEqual(20, cluster.max_concurrent_update_percentage)\n host = cluster.host_configs[0]\n self.assertEqual('host1', host.hostname)\n self.assertEqual(5, host.max_local_virtual_devices)\n self.assertEqual(1, len(host.tmpfs_configs))\n self.assertEqual('/atmpfs', host.tmpfs_configs[0].path)\n self.assertEqual(2000, host.tmpfs_configs[0].size)\n self.assertEqual('750', host.tmpfs_configs[0].mode)\n host = cluster.host_configs[1]\n self.assertEqual('host2', host.hostname)\n self.assertTrue(host.enable_ui_update)\n host = cluster.host_configs[2]\n self.assertEqual('host3', host.hostname)\n self.assertEqual('path/to/new/config.xml',\n host.tf_global_config_path)\n\n cluster = lab_config_pb.cluster_configs[1]\n self.assertEqual('cluster2', cluster.cluster_name)\n self.assertEqual('path/to/config.xml', cluster.tf_global_config_path)\n self.assertEqual(0, len(list(cluster.extra_docker_args)))\n self.assertEqual(2, len(cluster.host_configs))\n self.assertEqual('host4', cluster.host_configs[0].hostname)\n self.assertEqual('host5', cluster.host_configs[1].hostname)\n self.assertEqual(3600, cluster.shutdown_timeout_sec)\n self.assertTrue(cluster.enable_ui_update)", "def test_module_in_config(self, mocked_callable_loader,\n mocked_loader, mocked_config):\n config_filename = 'aconfigfile'\n finder = importer.Finder(config_filename)\n\n fullname = 'fake_package.fake_module'\n path = None\n finder.find_module(fullname, path)\n mocked_loader.assert_called_once_with(path, 'fake_callable',\n mock_callable_config)", "def test_read_config_option(self):\n # set up config\n config.set_config_file(os.path.join(path_to_module, \"test_config.conf\"))\n config.setup()\n # Test that all the parameters loaded from file are correct\n self.assertEqual(config.read_config_option('client_id'), 'uploader')\n self.assertEqual(config.read_config_option('client_secret'), 'secret')\n self.assertEqual(config.read_config_option('username'), 'admin')\n self.assertEqual(config.read_config_option('password'), 'password1')\n self.assertEqual(config.read_config_option('base_url'), 'http://localhost:8080/irida-latest/api/')\n self.assertEqual(config.read_config_option('parser'), 'miseq')\n self.assertEqual(config.read_config_option('readonly', bool), False)", "def testConfigC(self):\n assert type(self.config['debug']) == bool, \"Not parsing string to boolean correctly\"", "def load_test_config(root_fs) -> Config:\n with root_fs.open('config.yaml') as fd:\n return Config.parse(fd)" ]
[ "0.6836559", "0.6791693", "0.6766081", "0.6766081", "0.6679079", "0.6621784", "0.65958804", "0.6574629", "0.6568781", "0.65193504", "0.64859515", "0.64847076", "0.6438674", "0.639707", "0.63916653", "0.63844085", "0.63468117", "0.6346496", "0.6330521", "0.6291646", "0.62800074", "0.62205815", "0.61985356", "0.61711127", "0.61669636", "0.6163579", "0.6150345", "0.61191905", "0.6112584", "0.6080399" ]
0.7162671
0
Tests UserDirectorySearchModule.get_search_query_ordering return values
def test_get_search_query_ordering(self): user_config = {"weighted_display_name_like": "[Modernisation]"} module, _, _ = create_user_directory_search_module_with_config(user_config) # Check postgres # Check the generated SQL and arguments of the above config when using postgres sql, args = module.get_search_query_ordering(PostgresEngine) # We don't care too much about the specifics of the SQL, just that our injected # CASE is present self.assertIn("display_name like ?", sql.lower()) # Check that the returned arguments match our config expected_args = ("%" + user_config["weighted_display_name_like"] + "%",) self.assertEqual(args, expected_args) # Check sqlite # Check the generated SQL and arguments of the above config when using postgres sql, args = module.get_search_query_ordering(Sqlite3Engine) # We don't do anything different from Synapse's default SQL self.assertGreater(len(sql), 0) # Nor do we return any extra arguments expected_args = () self.assertEqual(args, expected_args)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_search_order(self, post):\n return 'website_published desc, %s' % \\\n self.order_by.get(post.get('order', ''), { 'query': 'website_sequence desc' })['query']", "def get_search_query_ordering(\n self, database_engine_type: BaseDatabaseEngine,\n ) -> Tuple[str, Tuple]:\n if database_engine_type == PostgresEngine:\n # We order by rank and then if a user has profile info.\n # This ranking algorithm is hand tweaked for \"best\" results. Broadly\n # the idea is that a higher weight is given to exact matches.\n # The array of numbers are the weights for the various part of the\n # search: (domain, _, display name, localpart)\n sql = \"\"\"\n (CASE WHEN d.user_id IS NOT NULL THEN 4.0 ELSE 1.0 END)\n \"\"\"\n\n args = ()\n if self.weighted_display_name_like is not None:\n sql += \"\"\"\\\n * (CASE WHEN display_name LIKE ? THEN 2.0 ELSE 1.0 END)\\\n \"\"\"\n args += (\"%\" + self.weighted_display_name_like + \"%\",)\n\n sql += \"\"\"\n * (CASE WHEN avatar_url IS NOT NULL THEN 1.2 ELSE 1.0 END)\n * (\n 3 * ts_rank_cd(\n '{0.1, 0.1, 0.9, 1.0}',\n vector,\n to_tsquery('simple', ?),\n 8\n )\n + ts_rank_cd(\n '{0.1, 0.1, 0.9, 1.0}',\n vector,\n to_tsquery('simple', ?),\n 8\n )\n )\n DESC,\n display_name IS NULL,\n avatar_url IS NULL\n \"\"\"\n return sql, args\n elif database_engine_type == Sqlite3Engine:\n # We order by rank and then if a user has profile info.\n return (\n \"\"\"\n rank(matchinfo(user_directory_search)) DESC,\n display_name IS NULL,\n avatar_url IS NULL\n \"\"\",\n (),\n )\n else:\n raise Exception(\"Received an unrecognized database engine\")", "def assertSearchFindsInOrder(self, query, ids):\n ctool = self.portal.portal_catalog\n result_ids = [b.getId for b in ctool.unrestrictedSearchResults(**query)]\n self.assertListEqual(result_ids, ids)", "def test_query_sort_default_sort_order(self):\n doc_count = 10\n field_to_be_sorted_by = \"data\"\n prefix = get_rand_string()\n\n data = [prefix + \"-\" + str(x) for x in range(10)]\n\n # Same user_id for all documents\n user_id = get_rand_string()\n\n for datum in data:\n self.conn.add(id=get_rand_string(), user_id=user_id, data=datum)\n self.conn.commit()\n\n results = self.conn.query(q=\"user_id:\" + user_id, sort=\"data\").results\n\n self.assertEquals(len(results), doc_count,\n \"There should be %d documents returned, got:%d, results:%s\" % (\n doc_count, len(results), results))\n\n query_data = [doc[\"data\"] for doc in results]\n\n for idx, datum in enumerate(sorted(data)):\n self.assertEquals(datum, query_data[idx],\n \"Expected %s instead of %s on position %s in query_data:%s\" % (\n datum, query_data[idx], idx, query_data))", "def test_query_sort_nondefault_sort_order(self):\n doc_count = 10\n field_to_be_sorted_by = \"data\"\n prefix = get_rand_string()\n\n data = [prefix + \"-\" + str(x) for x in range(10)]\n\n # Same user_id for all documents\n user_id = get_rand_string()\n\n for datum in data:\n self.conn.add(id=get_rand_string(), user_id=user_id, data=datum)\n self.conn.commit()\n\n results = self.conn.query(q=\"user_id:\" + user_id, sort=\"data\",\n sort_order=\"desc\").results\n\n self.assertEquals(len(results), doc_count,\n \"There should be %d documents returned, got:%d, results:%s\" % (\n doc_count, len(results), results))\n\n query_data = [doc[\"data\"] for doc in results]\n\n for idx, datum in enumerate(reversed(sorted(data))):\n self.assertEquals(datum, query_data[idx],\n \"Expected %s instead of %s on position %s in query_data:%s\" % (\n datum, query_data[idx], idx, query_data))", "def test_order(self):\n\n # issue a valid query\n # Assure proper execution, and get results from quilt_history\n o = str(quilt_test_core.call_quilt_script('quilt_submit.py', [\n '-y', 'out_of_order']))\n\n o = self.check_query_and_get_results3(o)\n\n # Check results\n # assure that results are in order\n l = []\n for i in xrange(1, 6):\n searchStr = \"{'timestamp': \" + str(i) + '}'\n index = o.find(searchStr)\n logging.debug(\"looking for string: \" + searchStr)\n self.assertTrue(index != -1)\n l.append(index)\n\n isSorted = all(l[i] <= l[i + 1] for i in xrange(len(l) - 1))\n self.assertTrue(isSorted)", "def test_sort_order(self):\n obj = self.conn.search(self.basedn, 2, attrlist=['uidNumber'],\n sort_order=[\"-uidNumber\"])\n sort = [o['uidNumber'][0] for o in obj if 'uidNumber' in o]\n self.assertTrue((all(sort[i] >= sort[i+1]\n for i in range(len(sort)-1))), \"Not sorted\")", "def get_sort_query(self, kind, order, is_number):\n pass", "def order_agent_results(self, agent_search_order):\n raise errors.Unimplemented()", "def test_order_direction(self):\n threads = [make_minimal_cs_thread()]\n self.register_get_user_response(self.user)\n self.register_get_threads_response(threads, page=1, num_pages=1)\n self.client.get(\n self.url,\n {\n \"course_id\": str(self.course.id),\n \"order_direction\": \"desc\",\n }\n )\n self.assert_last_query_params({\n \"user_id\": [str(self.user.id)],\n \"course_id\": [str(self.course.id)],\n \"sort_key\": [\"activity\"],\n \"page\": [\"1\"],\n \"per_page\": [\"10\"],\n })", "def test_sorting_name2(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"name_increasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertLessEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))", "def test_scroll_query_sort_safe(self):\n self._validate_scroll_search_params({\"sort\": \"_id\"}, {\"sort\": \"_id\"})", "def sort(self, *order_fields):\n return MockSearch(\n self, self._query, self.nested_filter_calls, order_fields,\n self._script_fields\n )", "def test_order_by(self, http_query, cc_query):\n threads = [make_minimal_cs_thread()]\n self.register_get_user_response(self.user)\n self.register_get_threads_response(threads, page=1, num_pages=1)\n self.client.get(\n self.url,\n {\n \"course_id\": str(self.course.id),\n \"order_by\": http_query,\n }\n )\n self.assert_last_query_params({\n \"user_id\": [str(self.user.id)],\n \"course_id\": [str(self.course.id)],\n \"page\": [\"1\"],\n \"per_page\": [\"10\"],\n \"sort_key\": [cc_query],\n })", "def order(self, searcher, docnums, reverse = False):\n raise NotImplementedError", "def test_sortby_invalid(self):\n qs = {'a': 1, 'w': 4, 'format': 'json', 'sortby': ''}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(200, response.status_code)", "def order_log_results(self, log_search_order):\n raise errors.Unimplemented()", "def order_log_entry_results(self, log_entry_search_order):\n raise errors.Unimplemented()", "def order(self, searcher, docnums, reverse = False):\n return docnums", "def get_query(self,q,request):\n kwargs = { \"%s__icontains\" % search_field : q }\n return model.objects.filter(**kwargs).order_by(search_field)", "def test_sorting_name(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"name_decreasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertGreaterEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))", "def test_valid_search_order(self) -> None:\n\n # fetch the most recent first, largest timestamp\n channel = self.make_request(\n \"GET\",\n self.url + \"?dir=b\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 20)\n report = 1\n while report < len(channel.json_body[\"event_reports\"]):\n self.assertGreaterEqual(\n channel.json_body[\"event_reports\"][report - 1][\"received_ts\"],\n channel.json_body[\"event_reports\"][report][\"received_ts\"],\n )\n report += 1\n\n # fetch the oldest first, smallest timestamp\n channel = self.make_request(\n \"GET\",\n self.url + \"?dir=f\",\n access_token=self.admin_user_tok,\n )\n\n self.assertEqual(200, channel.code, msg=channel.json_body)\n self.assertEqual(channel.json_body[\"total\"], 20)\n self.assertEqual(len(channel.json_body[\"event_reports\"]), 20)\n report = 1\n while report < len(channel.json_body[\"event_reports\"]):\n self.assertLessEqual(\n channel.json_body[\"event_reports\"][report - 1][\"received_ts\"],\n channel.json_body[\"event_reports\"][report][\"received_ts\"],\n )\n report += 1", "def search_orders(phrase):\n try:\n order_id = int(phrase.strip())\n return Order.objects.filter(id=order_id)\n except ValueError:\n pass\n\n sv = (\n SearchVector('user__first_name', weight='B') +\n SearchVector('user__last_name', weight='B') +\n SearchVector(\n 'user__default_shipping_address__first_name', weight='B') +\n SearchVector('user__default_shipping_address__last_name', weight='B') +\n SearchVector('user__email', weight='A'))\n rank = SearchRank(sv, SearchQuery(phrase))\n return Order.objects.annotate(rank=rank).filter(\n rank__gte=0.2).order_by('-rank')", "def get_ordering(self, request, queryset, view):\n ordering = []\n params = get_datatables_ordering(request.query_params)\n if params:\n fields = [param.strip() for param in params.split(',')]\n ordering = self.remove_invalid_fields(queryset, fields, view, request)\n if ordering:\n return ordering\n\n # No ordering was included, or all the ordering fields were invalid\n return self.get_default_ordering(view)", "def test_sort(self):\n sort_field = MoveSearchForm.sort\n for value, label in sort_field.kwargs['choices']:\n response = self.do_search(id=u'1', sort=value)\n self.assert_(\n response.tmpl_context.results,\n \"\"\"Sort by {0} doesn't crash\"\"\".format(value)\n )", "def optimize_query_order(queries):\n pass #TODO later when/as needed, now returns queries as-is\n return queries", "def get_search_results(query):\n global index, doc_names\n result = ranked = list()\n doc_list = set(doc_names.keys())\n flag = 0\n for word in query:\n if word in index:\n flag = 1\n doc_list = doc_list.intersection(index[word].keys())\n else:\n return []\n\n if flag != 0:\n for doc_id in doc_list:\n positions = list()\n for word in query:\n positions.append(index[word][doc_id])\n doc_result = [(doc_id, x) for x in position_merge(positions)]\n result += doc_result\n ranked = sorted(result, key=lambda x: (x[0], x[1]))\n return ranked", "def orderby():\n pass", "def test_sorting_surname2(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"surname_decreasing\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n for i in range(len(search_result)-1):\n self.assertGreaterEqual(search_result[i][\"name\"],search_result[i+1][\"name\"],\"{} sorting criteria doesn't work\".format(sorting_criteria))", "def sort_results(self):\n pass" ]
[ "0.6698615", "0.66707766", "0.6650491", "0.6406054", "0.6366886", "0.6198293", "0.59946716", "0.5983538", "0.59773964", "0.5948387", "0.5940092", "0.5885196", "0.5882724", "0.58709025", "0.58667475", "0.58603793", "0.585851", "0.5854477", "0.5850439", "0.5764674", "0.57599866", "0.5753389", "0.57465523", "0.5725255", "0.5720841", "0.5719713", "0.5709116", "0.57060343", "0.56860816", "0.5673841" ]
0.8195143
0
Assumes x and epsilon are positve floats & epsilon < 1 Returns a y such that yy is within epsilon of x
def squareRootExhaustive(x, epsilon): step = espilon**2 ans = 0.0 while abs(ans**2 - x) >= epsilon and ans*ans <= x: # The ans*ans <= is there because of floating point arithmetic I think. ans += step if ans*ans > x: raise ValueError return ans
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def finf(lhs, rhs, epsilon=0.00001):\n return rhs-lhs > epsilon", "def epsilon():\n return _EPSILON", "def squareRootBi(x, epsilon):\n low = 0.0\n high = max(1.0, x)\n ans = (high + low)/2.0\n while abs(ans**2 - x) >= epsilon:\n # Where as here the ans*ans <= x isn't required because every value in\n # the bisection search is by nature some factor of 2. Clever. \n if ans**2 < x:\n low = ans\n else:\n high = ans\n ans = (high + low) / 2.0\n return ans", "def epsilonEffective(epsilon1=0.9, epsilon2=0.9):\n result=1/(1/epsilon1+1/epsilon2-1)\n return result", "def is_almost_equal(self, x ,y ,epsilon=1*10**(-8)):\n \treturn abs(x-y) <= epsilon", "def approx_gradient(f, x, epsilon):\n n = len(x)\n g = np.zeros(n)\n ei = np.zeros(n)\n for i in range(n):\n ei[i] = .5 * epsilon\n g[i] = (f(x + ei) - f(x - ei)) / epsilon\n ei[i] = 0\n return g", "def IsApproximatelyEqual(x, y, epsilon):\n # Check absolute precision.\n if -epsilon <= x - y <= epsilon:\n return True\n\n # Is x or y too close to zero?0.\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\n return False\n\n # Check relative precision.\n return (-epsilon <= (x - y) / x <= epsilon\n or -epsilon <= (x - y) / y <= epsilon)", "def findRoot(x, power, epsilon):\n\n if x < 0 and power % 2 == 0:\n return None\n\n low = min(-1.0, x)\n high = max(1.0, x)\n ans = (high + low) / 2.0\n while abs(ans**power - x) >= epsilon:\n if ans ** power < x:\n low = ans\n else:\n high = ans\n ans = (high + low) / 2.0\n return ans", "def epsilon_insensitive(y_true,y_pred, epsilon):\n loss = T.maximum(T.abs_(y_true-y_pred)-epsilon,0)\n return loss", "def find_reasonable_epsilon(theta0, grad0, logp0, f):\n epsilon = 1.\n r0 = np.random.normal(0., 1., len(theta0))\n\n # Figure out what direction we should be moving epsilon.\n _, rprime, gradprime, logpprime = leapfrog(theta0, r0, grad0, epsilon, f)\n # brutal! This trick make sure the step is not huge leading to infinite\n # values of the likelihood. This could also help to make sure theta stays\n # within the prior domain (if any)\n k = 1.\n while np.isinf(logpprime) or np.isinf(gradprime).any():\n k *= 0.5\n _, rprime, _, logpprime = leapfrog(theta0, r0, grad0, epsilon * k, f)\n\n epsilon = 0.5 * k * epsilon\n\n # acceptprob = np.exp(logpprime - logp0 - 0.5 * (np.dot(rprime, rprime.T) - np.dot(r0, r0.T)))\n # a = 2. * float((acceptprob > 0.5)) - 1.\n logacceptprob = logpprime-logp0-0.5*(np.dot(rprime, rprime)-np.dot(r0,r0))\n a = 1. if logacceptprob > np.log(0.5) else -1.\n # Keep moving epsilon in that direction until acceptprob crosses 0.5.\n # while ( (acceptprob ** a) > (2. ** (-a))):\n while a * logacceptprob > -a * np.log(2):\n epsilon = epsilon * (2. ** a)\n _, rprime, _, logpprime = leapfrog(theta0, r0, grad0, epsilon, f)\n # acceptprob = np.exp(logpprime - logp0 - 0.5 * ( np.dot(rprime, rprime.T) - np.dot(r0, r0.T)))\n logacceptprob = logpprime-logp0-0.5*(np.dot(rprime, rprime)-np.dot(r0,r0))\n\n print(\"find_reasonable_epsilon=\", epsilon)\n\n return epsilon", "def myTwistFunctionAirliner(Epsilon):\n return -(6.53*Epsilon*Epsilon - 14.1*Epsilon + 4.24)", "def close(a,b):\n return abs(a-b) < epsilon", "def isinsidepointXY(x,p):\n \n return dist(x,p) < epsilon", "def epsilon_delta(self):", "def calc_epsilon(y_true, y_pred, weights):\n return float(np.dot(weights, y_pred == y_true))", "def epsilon(current_episode, num_episodes):\n # return 1 - (current_episode/num_episodes)\n return .5 * .9**current_episode", "def IsApproximatelyEqual(x, y, epsilon = 1e-6):\n # Check absolute precision.\n if -epsilon <= x - y <= epsilon:\n return True\n\n # Is x or y too close to zero?\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\n return False\n\n # Check relative precision.\n return (-epsilon <= (x - y) / x <= epsilon or -epsilon <= (x - y) / y <= epsilon)", "def is_equal_approx(x, y, epsilon=1e-6):\r\n # Check absolute precision.\r\n if -epsilon <= x - y <= epsilon:\r\n return True\r\n\r\n # Is x or y too close to zero?\r\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\r\n return False\r\n\r\n # Check relative precision.\r\n return (-epsilon <= (x - y) / x <= epsilon\r\n or -epsilon <= (x - y) / y <= epsilon)", "def grad_checker(X, y, theta, epsilon=0.01, tolerance=1e-4):\n true_gradient = compute_square_loss_gradient(X, y, theta) #The true gradient\n num_features = theta.shape[0]\n approx_grad = np.zeros(num_features) #Initialize the gradient we approximate\n #TODO\n e_i = np.zeros(num_features)\n for k in range(num_features):\n e_i[k] = 1\n approx_grad[k] = (compute_square_loss(X, y, theta+epsilon*e_i)-compute_square_loss(X, y, theta-epsilon*e_i))/(2*epsilon) \n e_i[k] = 0\n\n return np.sqrt(sum((true_gradient-approx_grad)**2)) < tolerance", "def derivative(f, x, epsilon = 1e-10):\n\n x_ = x + epsilon\n value = (f(x_) - f(x)) / epsilon\n\n return value", "def grad_checker(X, y, theta, epsilon=0.01, tolerance=1e-4):\n true_gradient = compute_square_loss_gradient(X, y, theta) #the true gradient\n num_features = theta.shape[0]\n \n e = np.eye(num_features)\n denominator = np.float(2*epsilon)\n numerator = np.array([ compute_square_loss(X_train,y_train,theta+epsilon*e[i]) - compute_square_loss(X_train,y_train,theta-epsilon*e[i]) for i in range(num_features) ] )\n diff = (true_gradient - numerator/denominator)\n \n return (diff.dot(diff) < tolerance)", "def _define_epsilon(n,T,a=1):\n\n return np.sqrt(np.log(n)/T)*a", "def test_validate_epsilon():\n with pytest.raises(ValueError):\n validate_epsilon([0.1], 2)\n with pytest.raises(ValueError):\n validate_epsilon([-0.1, 1], 2)\n\n assert (validate_epsilon(0.1, 2) == np.array([0.1, 0.1])).all()\n assert (validate_epsilon([0.1, 0.1], 2) == np.array([0.1, 0.1])).all()\n assert (validate_epsilon(np.array([0.1, 0.1]), 2) == np.array([0.1, 0.1])).all()", "def epsilon(self):\n return self._epsilon", "def feq(x, y, precision=0.0000005):\n x = np.asanyarray(x)\n y = np.asanyarray(y)\n boolean = abs(x-y) <= (abs(x+y)*precision)\n return boolean", "def approx_eq(x, y, tolerance=1e-15):\n return abs(x - y) < tolerance", "def epsilon(self):\n return self.__epsilon", "def decay_epsilon(self, epsilon, MIN_EPSILON,\r\n EPSILON_DECAY: float) -> float:\r\n if epsilon > MIN_EPSILON:\r\n epsilon *= EPSILON_DECAY\r\n epsilon = max(MIN_EPSILON, epsilon)\r\n return epsilon", "def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True", "def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True" ]
[ "0.65840507", "0.6458624", "0.6209346", "0.62057513", "0.6190196", "0.6165473", "0.6135637", "0.6084486", "0.6045431", "0.6017484", "0.59838367", "0.5971854", "0.5954014", "0.5921355", "0.5895756", "0.58770144", "0.58325094", "0.5807985", "0.5804738", "0.580202", "0.5789465", "0.57743174", "0.57688755", "0.5766774", "0.57630914", "0.57477176", "0.5745242", "0.57398313", "0.5733702", "0.5733702" ]
0.6803677
0
Test fetching samples to prepare.
def test_samples_to_prepare(sample_store): # GIVEN a store with sample in a mix of states assert len(sample_store._get_query(table=Sample).all()) > 1 assert ( len( [sample for sample in sample_store._get_query(table=Sample).all() if sample.prepared_at] ) >= 1 ) # WHEN finding which samples are in queue to be prepared prepare_samples: List[Sample] = sample_store.get_samples_to_prepare() # THEN samples should be a list of samples assert isinstance(prepare_samples, list) assert isinstance(prepare_samples[0], Sample) # THEN it should list the received sample assert len(prepare_samples) == 1 assert prepare_samples[0].name == "received"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare(params, samples):\r\n return", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def prepareSamples(username, testType):\r\n \r\n #userSamps = DBController().getUserSamples(username, testType)", "def test_init(self):\n sample = PrepSample(self.sample_id, self.prep_template)\n # Check that the internal id have been correctly set\n self.assertEqual(sample._id, '1.SKB8.640193')\n # Check that the internal template have been correctly set\n self.assertEqual(sample._md_template, self.prep_template)\n # Check that the internal dynamic table name have been correctly set\n self.assertEqual(sample._dynamic_table, \"prep_1\")", "def test_get_sample_ids(self):\n obs = self.tester._get_sample_ids(self.conn_handler)\n self.assertEqual(obs, self.exp_sample_ids)", "def test_get_sample_ids(self):\n obs = self.tester._get_sample_ids(self.conn_handler)\n self.assertEqual(obs, self.exp_sample_ids)", "def _fetch_data(self, samples):\n pass", "def test_api_samples(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n self.assertIn('count', r)\n self.assertIn('next', r)\n self.assertIn('prev', r)\n self.assertIn('samples', r)", "def test_get_all_samples(self):\n self.login()\n\n page_size = 20\n\n # hit the API endpoint for both pages\n for page in range(1, 3):\n\n data = {'page': page,\n 'page_size': page_size}\n response = self.client.get(reverse('searchsamples'), data, format='json')\n\n expected = Sample.objects.all().order_by(\"-received\")\n\n paginator = Paginator(expected, page_size)\n res = paginator.page(page)\n\n # format queryset into json for returning\n serializer = SampleSerializer(res, many=True)\n\n context = {\n 'data': serializer.data,\n 'more': (page == 1)\n }\n\n self.assertEqual(response.json(), context)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_acquire_sample(self):\n self.assert_initialize_driver()\n self.assert_particle_generation(ProtocolEvent.ACQUIRE_SAMPLE, DataParticleType.METBK_PARSED, self.assert_data_particle_sample)", "def test_00_setup(self):\n with mock_api(magento_base_responses):\n import_batch(self.session, 'magento.website', self.backend_id)\n import_batch(self.session, 'magento.store', self.backend_id)\n import_batch(self.session, 'magento.storeview', self.backend_id)\n import_record(self.session, 'magento.res.partner.category',\n self.backend_id, 1)", "def test_intent_classifier_get_testing_samples(self):\n pass", "def setUp(self):\n self.dataset = get_test_dataset()", "def test_get_scenarios(self):\n pass", "def setUp(self):\n self.aoSamples = [ModelDataBase(),];", "async def test_all_samples(self):\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"248\", entities=[])", "def test_batch(self):\n pass", "def test_intent_classifier_get_training_samples(self):\n pass", "def test_api_sample(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n # load a sample\n r = requests.get(r['samples'][0]['url']).json()\n self.assertIn('datetime', r)\n self.assertIn('value', r)\n self.assertIn('id', r)\n self.assertIn('url', r)\n self.assertIn('sensor', r)", "async def test_successful_samples(self):\n self.set_source_parameter(\"test_result\", [\"success\"])\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"242\", entities=[])", "def test_0():\n sync.gen_multi_fake_data()#default is only one randomly selected data set\n sync.main(testing=True)", "def prepare(self):\n super(Test200SmartSanityDownload004, self).prepare()\n\n self.logger.info('Preconditions:')\n self.logger.info('1. Open Micro/WINr; ')\n self.logger.info('2. Set up connection with PLC;')\n self.logger.info('3. Create a project which has OB,DB,SDB;')\n self.MicroWIN.test_prepare('ob_db_sdb_01.smart')\n self.PROJECT.project_open('ob_db_sdb_02.smart')", "def test_get_results(self):\n pass", "def test_text_classifier_get_testing_samples(self):\n pass", "def setup(self):\n self.rows = test_helpers.fetch_sample_teradata_rows()\n self.csv_path = 'not/a/real/path'", "def test_info_after_download_and_prepare(self):\n for config in self.builder.BUILDER_CONFIGS:\n with self._subTest(config.name):\n builder = self._make_builder(config=config)\n self._download_and_prepare_as_dataset(builder)\n self.assertEqual(\n builder.info.metadata['synsets']['07491708']['name'],\n '__enjoyment_NN_1',\n )\n self.assertEqual(\n builder.info.metadata['synsets']['08769179']['definition'],\n (\n 'an area in Germany around the upper Elbe river; '\n 'the original home of the Saxons'\n ),\n )", "def test_context_data(self):\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n self.assertIn('study', context)\n self.assertIn('trait_count', context)\n self.assertIn('dataset_count', context)\n self.assertEqual(context['study'], self.study)\n self.assertEqual(context['trait_count'], '{:,}'.format(len(self.source_traits)))\n dataset_count = models.SourceDataset.objects.filter(source_study_version__study=self.study).count()\n self.assertEqual(context['dataset_count'], '{:,}'.format(dataset_count))", "def test_context_data(self):\n response = self.client.get(self.get_url(self.study.pk))\n context = response.context\n self.assertIn('study', context)\n self.assertIn('trait_count', context)\n self.assertIn('dataset_count', context)\n self.assertEqual(context['study'], self.study)\n self.assertEqual(context['trait_count'], '{:,}'.format(len(self.source_traits)))\n dataset_count = models.SourceDataset.objects.filter(source_study_version__study=self.study).count()\n self.assertEqual(context['dataset_count'], '{:,}'.format(dataset_count))", "def run_sample(self):\n # there will be validation failures for sample data\n self.validate_req(ignore_failure=True)\n runner_fn = self.model_runner.execute_model_for_sample_data\n return self.do_handle_request(runner_fn)", "def test_training(self):\n\t\tpass" ]
[ "0.70553994", "0.688015", "0.67418826", "0.6723766", "0.66855913", "0.66855913", "0.66639364", "0.6653668", "0.6629881", "0.6424612", "0.64179444", "0.6364231", "0.6348621", "0.6331313", "0.6270328", "0.6239057", "0.62186223", "0.62135476", "0.62124485", "0.6156397", "0.61346513", "0.61080694", "0.610062", "0.60926574", "0.60764325", "0.60543084", "0.60517406", "0.60517406", "0.6023513", "0.602283" ]
0.68960553
1
Test fetching a sample by entry id.
def test_get_sample_by_entry_id(sample_store, entry_id=1): # GIVEN a store with a sample assert len(sample_store._get_query(table=Sample).all()) > 1 # WHEN finding a sample by entry id sample: Sample = sample_store.get_sample_by_entry_id(entry_id=entry_id) # THEN samples should be a list of samples assert isinstance(sample, Sample) # THEN it should return the sample assert sample.id == entry_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_sample_by_id():\n sample_id = demisto.getArg('id')\n r = req('GET', SUB_API + 'samples/' + sample_id)\n sample = sample_to_readable(r.json().get('data'))\n md = tableToMarkdown('ThreatGrid - Sample', [sample], [\n 'ID', 'Filename', 'State', 'Status', 'MD5', 'SHA1', 'SHA256', 'OS', 'SubmittedAt', 'StartedAt', 'CompletedAt'\n ])\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.Sample(val.ID == obj.ID)': sample},\n 'HumanReadable': md,\n 'ContentsFormat': formats['json'],\n 'Contents': r.json()\n })", "def test_sample_one_id(self):\r\n self.assertEqual(self.test_sample.id, 1)", "def test_get_sample_by_internal_id(sample_store, internal_id=\"test_internal_id\"):\n # GIVEN a store with a sample\n assert len(sample_store._get_query(table=Sample).all()) > 1\n\n # WHEN finding a sample by internal id\n sample: Sample = sample_store.get_sample_by_internal_id(internal_id=internal_id)\n\n # THEN samples should be a list of samples\n assert isinstance(sample, Sample)\n\n # THEN it should return the sample\n assert sample.internal_id == internal_id", "def get_sample_state_by_id():\n ids = [] # type: list\n if demisto.getArg('ids'):\n ids += argToList(demisto.getArg('ids'))\n if demisto.getArg('id'):\n ids.append(demisto.getArg('id'))\n response = get_sample_state_helper(ids)\n md = tableToMarkdown('ThreatGrid - Sample state', response['samples'], ['ID', 'State'])\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.Sample(val.ID == obj.ID)': response['samples']},\n 'HumanReadable': md,\n 'ContentsFormat': formats['json'],\n 'Contents': response['requests']\n })", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = Sample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_get_case_by_id(self):\n pass", "def test_getitem(self):\n obs = self.tester['1.SKM7.640188']\n exp = Sample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_get_sample_ids(self):\n obs = self.tester._get_sample_ids(self.conn_handler)\n self.assertEqual(obs, self.exp_sample_ids)", "def test_get_sample_ids(self):\n obs = self.tester._get_sample_ids(self.conn_handler)\n self.assertEqual(obs, self.exp_sample_ids)", "def test_get_recipe_by_id(self):\n recipe = self.request_mgr.get_recipe_by_id(35354)\n self.assertIn(\"Guinness\", recipe.get('title'))", "def test_drugs_id_get(self):\n pass", "def test_sounds_id_get(self):\n pass", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_fetch_one():\n sample_uuid = get_sample_id()\n response = requests.get(f'http://localhost:5000/api/persons/{sample_uuid}')\n data = response.json()\n\n assert response.status_code == 200\n for field in FIELDS:\n assert field in data", "def sample(self, sample_id: str):\n\n class LimsSample:\n \"\"\" A mock class for a sample coming from LIMS. It only needs a comment \"\"\"\n\n def __init__(self, sample_id):\n self.sample_id = sample_id\n self.sample_data = {\"comment\": \"a comment in LimsSample\"}\n\n def get(self, key):\n \"\"\" only here to get the sample.get('comment') \"\"\"\n return self.sample_data.get(key, \"not found\")\n\n # haha, it's a factory!\n if not self.lims_sample:\n self.lims_sample = LimsSample(sample_id)\n\n return self.lims_sample", "def test_detail_by_id(self):\n responses.add(\n responses.Response(\n method='GET',\n url=('https://connection.keboola.com/v2/storage/buckets/'\n 'in.c-ga'),\n json=detail_response\n )\n )\n bucket_id = 'in.c-ga'\n bucket_detail = self.buckets.detail(bucket_id)\n assert bucket_detail['id'] == 'in.c-ga'", "def test_getitem(self):\n obs = self.tester['1.SKM7.640188']\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_getitem_id_column(self):\n self.assertEqual(self.tester['required_sample_info_status'],\n 'completed')", "def test_getSampleMetadata_bad_sample_id(self):\r\n # Nonexistent sample ID.\r\n self.assertRaises(KeyError, self.overview_map.getSampleMetadata,\r\n 'PC.000')\r\n self.assertRaises(KeyError, self.no_metadata.getSampleMetadata,\r\n 'PC.000')\r\n # Integer sample ID.\r\n self.assertRaises(KeyError, self.overview_map.getSampleMetadata, 42)\r\n # Sample ID of type None.\r\n self.assertRaises(KeyError, self.overview_map.getSampleMetadata, None)\r\n\r\n # Sample ID on empty map.\r\n self.assertRaises(KeyError, self.empty_map.getSampleMetadata, 's1')\r\n # Integer sample ID on empty map.\r\n self.assertRaises(KeyError, self.empty_map.getSampleMetadata, 1)\r\n # Sample ID of None on empty map.\r\n self.assertRaises(KeyError, self.empty_map.getSampleMetadata, None)", "def test_brains_id_get(self):\n pass", "def test_get_by_id(db):\n thing = Thing(id=1, name=\"Thing A\")\n db.session.query(Thing).delete()\n db.session.commit()\n\n db.session.add(thing)\n db.session.commit()\n\n retrieved = Thing.query.get(thing.id)\n assert retrieved == thing\n assert repr(retrieved) == \"<Thing 'Thing A'>\"", "def test_getSampleMetadata_bad_sample_id(self):\n # Nonexistent sample ID.\n self.assertRaises(KeyError, self.overview_map.getSampleMetadata,\n 'PC.000')\n self.assertRaises(KeyError, self.no_metadata.getSampleMetadata,\n 'PC.000')\n # Integer sample ID.\n self.assertRaises(KeyError, self.overview_map.getSampleMetadata, 42)\n # Sample ID of type None.\n self.assertRaises(KeyError, self.overview_map.getSampleMetadata, None)\n\n # Sample ID on empty map.\n self.assertRaises(KeyError, self.empty_map.getSampleMetadata, 's1')\n # Integer sample ID on empty map.\n self.assertRaises(KeyError, self.empty_map.getSampleMetadata, 1)\n # Sample ID of None on empty map.\n self.assertRaises(KeyError, self.empty_map.getSampleMetadata, None)", "def test_sample_one_patient_id(self):\r\n self.assertEqual(self.test_sample.patientID.id, 2)", "def test_get_article_by_id():\n article = Article(\n author = '[email protected]',\n title = 'New Article',\n content = 'Super extra awesome article'\n ).save()\n\n query = GetArticleByIDQuery(\n id = article.id\n )\n\n assert query.execute().id == article.id", "def test_data_source_soaps_id_get(self):\n pass", "def test_get_sample_ids(self):\r\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\r\n parse_metadata_state_descriptions('Study:Twin')), [])\r\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\r\n parse_metadata_state_descriptions('Study:Dog')), ['a', 'b'])\r\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\r\n parse_metadata_state_descriptions('Study:*,!Dog')), ['c', 'd', 'e'])\r\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\r\n parse_metadata_state_descriptions('Study:*,!Dog;BodySite:Stool')), ['e'])\r\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\r\n parse_metadata_state_descriptions('BodySite:Stool')), ['a', 'b', 'e'])", "def test_get_sample_ids(self):\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\\\n parse_metadata_state_descriptions('Study:Twin')), [])\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\\\n parse_metadata_state_descriptions('Study:Dog')), ['a','b'])\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\\\n parse_metadata_state_descriptions('Study:*,!Dog')), ['c','d','e'])\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\\\n parse_metadata_state_descriptions('Study:*,!Dog;BodySite:Stool')), ['e'])\n self.assertEqual(get_sample_ids(self.map_data, self.map_headers,\\\n parse_metadata_state_descriptions('BodySite:Stool')), ['a','b','e'])", "def test_api_sample(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n # load a sample\n r = requests.get(r['samples'][0]['url']).json()\n self.assertIn('datetime', r)\n self.assertIn('value', r)\n self.assertIn('id', r)\n self.assertIn('url', r)\n self.assertIn('sensor', r)", "def test_retrieve_specific_ingredient(self):\n ingredient = create_sample_ingredient(user=self.user, name='Honey')\n\n url = detail_url(ingredient.id)\n response = self.client.get(url)\n\n serializer = IngredientSerializer(ingredient)\n\n self.assertEqual(response.data, serializer.data)", "def test_plays_id_get(self):\n pass" ]
[ "0.72672176", "0.6994822", "0.6854643", "0.665835", "0.6641262", "0.66313267", "0.6487416", "0.64807725", "0.64807725", "0.64692223", "0.64609057", "0.64068794", "0.636549", "0.6269254", "0.6269249", "0.62489676", "0.6215826", "0.62132645", "0.61510736", "0.6108318", "0.61035985", "0.6102699", "0.6097807", "0.60724926", "0.60533077", "0.6043813", "0.6038219", "0.5999402", "0.5991173", "0.59708" ]
0.85014653
0
Test fetching a sample by internal id.
def test_get_sample_by_internal_id(sample_store, internal_id="test_internal_id"): # GIVEN a store with a sample assert len(sample_store._get_query(table=Sample).all()) > 1 # WHEN finding a sample by internal id sample: Sample = sample_store.get_sample_by_internal_id(internal_id=internal_id) # THEN samples should be a list of samples assert isinstance(sample, Sample) # THEN it should return the sample assert sample.internal_id == internal_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_sample_by_entry_id(sample_store, entry_id=1):\n # GIVEN a store with a sample\n assert len(sample_store._get_query(table=Sample).all()) > 1\n\n # WHEN finding a sample by entry id\n sample: Sample = sample_store.get_sample_by_entry_id(entry_id=entry_id)\n\n # THEN samples should be a list of samples\n assert isinstance(sample, Sample)\n\n # THEN it should return the sample\n assert sample.id == entry_id", "def test_sample_one_id(self):\r\n self.assertEqual(self.test_sample.id, 1)", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = Sample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def get_sample_by_id():\n sample_id = demisto.getArg('id')\n r = req('GET', SUB_API + 'samples/' + sample_id)\n sample = sample_to_readable(r.json().get('data'))\n md = tableToMarkdown('ThreatGrid - Sample', [sample], [\n 'ID', 'Filename', 'State', 'Status', 'MD5', 'SHA1', 'SHA256', 'OS', 'SubmittedAt', 'StartedAt', 'CompletedAt'\n ])\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.Sample(val.ID == obj.ID)': sample},\n 'HumanReadable': md,\n 'ContentsFormat': formats['json'],\n 'Contents': r.json()\n })", "def test_get_sample_ids(self):\n obs = self.tester._get_sample_ids(self.conn_handler)\n self.assertEqual(obs, self.exp_sample_ids)", "def test_get_sample_ids(self):\n obs = self.tester._get_sample_ids(self.conn_handler)\n self.assertEqual(obs, self.exp_sample_ids)", "def sample(self, sample_id: str):\n\n class LimsSample:\n \"\"\" A mock class for a sample coming from LIMS. It only needs a comment \"\"\"\n\n def __init__(self, sample_id):\n self.sample_id = sample_id\n self.sample_data = {\"comment\": \"a comment in LimsSample\"}\n\n def get(self, key):\n \"\"\" only here to get the sample.get('comment') \"\"\"\n return self.sample_data.get(key, \"not found\")\n\n # haha, it's a factory!\n if not self.lims_sample:\n self.lims_sample = LimsSample(sample_id)\n\n return self.lims_sample", "def test_getitem(self):\n obs = self.tester['1.SKM7.640188']\n exp = Sample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_sounds_id_get(self):\n pass", "def test_get_case_by_id(self):\n pass", "def test_api_sample(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n # load a sample\n r = requests.get(r['samples'][0]['url']).json()\n self.assertIn('datetime', r)\n self.assertIn('value', r)\n self.assertIn('id', r)\n self.assertIn('url', r)\n self.assertIn('sensor', r)", "def test_sample_one_patient_id(self):\r\n self.assertEqual(self.test_sample.patientID.id, 2)", "def test_getitem(self):\n obs = self.tester['1.SKM7.640188']\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_data_source_soaps_id_get(self):\n pass", "def test_drugs_id_get(self):\n pass", "def test_brains_id_get(self):\n pass", "def get_sample_state_by_id():\n ids = [] # type: list\n if demisto.getArg('ids'):\n ids += argToList(demisto.getArg('ids'))\n if demisto.getArg('id'):\n ids.append(demisto.getArg('id'))\n response = get_sample_state_helper(ids)\n md = tableToMarkdown('ThreatGrid - Sample state', response['samples'], ['ID', 'State'])\n demisto.results({\n 'Type': entryTypes['note'],\n 'EntryContext': {'ThreatGrid.Sample(val.ID == obj.ID)': response['samples']},\n 'HumanReadable': md,\n 'ContentsFormat': formats['json'],\n 'Contents': response['requests']\n })", "def test_get_record(self):\n pass", "def test_fetch_one():\n sample_uuid = get_sample_id()\n response = requests.get(f'http://localhost:5000/api/persons/{sample_uuid}')\n data = response.json()\n\n assert response.status_code == 200\n for field in FIELDS:\n assert field in data", "def test_getitem_id_column(self):\n self.assertEqual(self.tester['required_sample_info_status'],\n 'completed')", "def test_data_source_soaps_id_exists_get(self):\n pass", "def test_solareclipses_id_get(self):\n pass", "def test_plays_id_get(self):\n pass", "def test_get_sdb_by_id(self, mock_get, mock_sdb_id):\n\n mock_resp = self._mock_response(content=json.dumps(self.sdb_data))\n mock_get.return_value = mock_resp\n\n details = self.client.get_sdb_by_id(\"5f0-99-414-bc-e5909c\")\n\n assert_equals(details, self.sdb_data)\n assert_in('X-Cerberus-Client', self.client.HEADERS)\n mock_get.assert_called_with(\n self.cerberus_url + '/v2/safe-deposit-box/5f0-99-414-bc-e5909c',\n headers=self.client.HEADERS\n )", "def test_get_analysis_with_id(analysis_store: MockStore):\n # GIVEN a store with an analysis\n existing_analysis: Analysis = analysis_store.get_query(table=Analysis).first()\n\n # WHEN accessing it by ID\n analysis: Analysis = analysis_store.get_analysis_with_id(analysis_id=existing_analysis.id)\n\n # THEN it should return the same analysis\n assert analysis == existing_analysis", "def test_get(client, example_records, h, prefix):\n id_ = example_records[0].id\n\n res = client.get(f'{prefix}{id_}', headers=h)\n assert res.status_code == 200\n assert res.json['id'] == id_\n # Test links\n assert res.json['links'] == {\n 'self': 'https://127.0.0.1:5000/api/vocabularies/licenses/cc-by'\n }", "def fixture_microbial_sample_id():\n return \"microbial_sample_test\"", "def test_get_device_by_id(self):\n pass", "def test_get_measure_parameters_by_id(self):\n pass" ]
[ "0.7371811", "0.7272965", "0.71735644", "0.696735", "0.69627017", "0.69409776", "0.69409776", "0.68801206", "0.67857003", "0.66956204", "0.6688522", "0.6670793", "0.66263884", "0.6589909", "0.65849036", "0.6567184", "0.6541756", "0.6528299", "0.6515762", "0.6508632", "0.63984716", "0.6384312", "0.6381696", "0.6366511", "0.6357047", "0.6275855", "0.6241891", "0.6239974", "0.6220896", "0.6220401" ]
0.8107025
0
Test fetching samples to deliver.
def test_get_samples_to_deliver(sample_store): # GIVEN a store with a sample assert len(sample_store._get_query(table=Sample).all()) > 1 # WHEN finding samples to deliver samples = sample_store.get_samples_to_deliver() # THEN samples should be a list of samples assert isinstance(samples, list) assert isinstance(samples[0], Sample) # THEN it should return the samples that are sequenced but not delivered assert len(samples) == 2 assert {sample.name for sample in samples} == set(["to-deliver", "sequenced"])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _fetch_data(self, samples):\n pass", "def test_get_sample_ids(self):\n obs = self.tester._get_sample_ids(self.conn_handler)\n self.assertEqual(obs, self.exp_sample_ids)", "def test_get_sample_ids(self):\n obs = self.tester._get_sample_ids(self.conn_handler)\n self.assertEqual(obs, self.exp_sample_ids)", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "async def test_successful_samples(self):\n self.set_source_parameter(\"test_result\", [\"success\"])\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"242\", entities=[])", "async def test_all_samples(self):\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"248\", entities=[])", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = Sample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_get_samples_to_receive_internal(sample_store):\n # GIVEN a store with samples in a mix of states\n assert len(sample_store._get_query(table=Sample).all()) > 1\n assert (\n len(\n [sample for sample in sample_store._get_query(table=Sample).all() if sample.received_at]\n )\n > 1\n )\n\n # WHEN finding which samples are in queue to receive\n assert len(sample_store.get_samples_to_receive()) == 3\n first_sample = sample_store.get_samples_to_receive()[0]\n\n # THEN samples should be a sample\n assert isinstance(first_sample, Sample)\n\n assert first_sample.application_version.application.is_external is False\n assert first_sample.received_at is None", "def test_get_all_samples(self):\n self.login()\n\n page_size = 20\n\n # hit the API endpoint for both pages\n for page in range(1, 3):\n\n data = {'page': page,\n 'page_size': page_size}\n response = self.client.get(reverse('searchsamples'), data, format='json')\n\n expected = Sample.objects.all().order_by(\"-received\")\n\n paginator = Paginator(expected, page_size)\n res = paginator.page(page)\n\n # format queryset into json for returning\n serializer = SampleSerializer(res, many=True)\n\n context = {\n 'data': serializer.data,\n 'more': (page == 1)\n }\n\n self.assertEqual(response.json(), context)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_api_samples(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n self.assertIn('count', r)\n self.assertIn('next', r)\n self.assertIn('prev', r)\n self.assertIn('samples', r)", "def test_samples_to_receive_external(sample_store: Store, helpers: StoreHelpers):\n # GIVEN a store with a mixture of samples\n assert len(sample_store._get_query(table=Sample).all()) > 1\n\n # WHEN finding external samples to receive\n samples: List[Sample] = sample_store.get_samples_to_receive(external=True)\n\n # THEN samples should be a list of samples\n assert isinstance(samples, list)\n assert isinstance(samples[0], Sample)\n\n # THEN assert that only the external sample is returned\n assert len(samples) == 1\n\n first_sample = samples[0]\n # THEN assert that the sample is external in database\n assert first_sample.application_version.application.is_external is True\n # THEN assert that the sample is does not have a received at stamp\n assert first_sample.received_at is None", "def test_get(self):\n log.info(\"START INTEG TEST GET\")\n\n # Start sampling.\n self.clear_sample_data()\n self.driver.start_sampling()\n self.clear_async_data()\n\n # From sample file A0000010.DEC:\n # Flag record, first and last velocity record, time record.\n log.info(\"FIRST FILE A0000002 INTEG TEST GET\")\n self.create_sample_data('valid_A0000002.DEC', \"A0000002.DEC\")\n self.assert_data(None, 'valid_A0000002.yml', \n count=3, timeout=10)\n\n # From sample file A0000010.DEC:\n # Flag record, first and last velocity records twice, time record.\n log.info(\"SECOND FILE A0000004 INTEG TEST GET\")\n self.clear_async_data()\n self.create_sample_data('valid_A0000004.DEC', \"A0000004.DEC\")\n self.assert_data(None, 'valid_A0000004.yml', \n count=5, timeout=10)\n\n # Made-up data with all flags set to True.\n # Field values may not be realistic.\n log.info(\"THIRD FILE A0000003 INTEG TEST GET\")\n self.clear_async_data()\n self.create_sample_data('all_A0000003.DEC', \"A0000003.DEC\")\n self.assert_data(None, 'all_A0000003.yml', \n count=4, timeout=10)\n log.info(\"END INTEG TEST GET\")", "def test_api_sample(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n # load a sample\n r = requests.get(r['samples'][0]['url']).json()\n self.assertIn('datetime', r)\n self.assertIn('value', r)\n self.assertIn('id', r)\n self.assertIn('url', r)\n self.assertIn('sensor', r)", "def test_intent_classifier_get_testing_samples(self):\n pass", "def sample(self):\n timestamp = time.time()\n try:\n res = requests.get(self.url)\n except requests.exceptions.ConnectionError as error:\n LOG.warning(\"%s %s\", self, error)\n return\n if 199 < res.status_code < 300:\n self.data.append((timestamp, res.json()))\n LOG.debug(\"%s appended data sample\", self)\n else:\n LOG.warning(\"Error %s loading data from %s\", res.status_code, self)\n self.data = self.data[-self.max_samples:]", "def test_products_get(mocker, data):\n mocker.patch(\"sps.request.fetch\", autospec=True)\n request.fetch.return_value = data\n assert products.get(None, \"fake-file-name\", False, False)\n request.fetch.assert_called_with(\"https://scc.suse.com/api/package_search/products\")", "def test_retrieve_publishers(self):\n sample_publisher()\n sample_publisher()\n\n res = self.client.get(reverse('publishers'))\n publishers = Publisher.objects.all()\n serializer = PublisherSerializer(publishers, many=True)\n self.assertEqual(res.data, serializer.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_get_results(self):\n pass", "def test_autosample(self):\n \n # Start data subscribers.\n #self._start_data_subscribers(6)\n #self.addCleanup(self._stop_data_subscribers) \n \n # Set up a subscriber to collect error events.\n #self._start_event_subscriber('ResourceAgentResourceStateEvent', 7)\n #self.addCleanup(self._stop_event_subscriber) \n \n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.UNINITIALIZED)\n \n cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.INACTIVE)\n\n cmd = AgentCommand(command=ResourceAgentEvent.GO_ACTIVE)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.IDLE)\n\n cmd = AgentCommand(command=ResourceAgentEvent.RUN)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.COMMAND)\n\n cmd = AgentCommand(command=SBE37ProtocolEvent.START_AUTOSAMPLE)\n retval = self._ia_client.execute_resource(cmd)\n \n gevent.sleep(15)\n \n cmd = AgentCommand(command=SBE37ProtocolEvent.STOP_AUTOSAMPLE)\n retval = self._ia_client.execute_resource(cmd)\n \n cmd = AgentCommand(command=ResourceAgentEvent.RESET)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.UNINITIALIZED)\n\n #self._async_event_result.get(timeout=CFG.endpoint.receive.timeout)\n #self.assertGreaterEqual(len(self._events_received), 6)\n\n #self._async_sample_result.get(timeout=CFG.endpoint.receive.timeout)\n #self.assertGreaterEqual(len(self._samples_received), 6)", "def test_candidates_retrieve(self):\n pass", "def test_samples_to_prepare(sample_store):\n # GIVEN a store with sample in a mix of states\n assert len(sample_store._get_query(table=Sample).all()) > 1\n assert (\n len(\n [sample for sample in sample_store._get_query(table=Sample).all() if sample.prepared_at]\n )\n >= 1\n )\n\n # WHEN finding which samples are in queue to be prepared\n prepare_samples: List[Sample] = sample_store.get_samples_to_prepare()\n\n # THEN samples should be a list of samples\n assert isinstance(prepare_samples, list)\n assert isinstance(prepare_samples[0], Sample)\n\n # THEN it should list the received sample\n assert len(prepare_samples) == 1\n assert prepare_samples[0].name == \"received\"", "def test_collect_demands(self):\n pass", "def test_acquire_sample(self):\n self.assert_initialize_driver()\n self.assert_particle_generation(ProtocolEvent.ACQUIRE_SAMPLE, DataParticleType.METBK_PARSED, self.assert_data_particle_sample)", "async def test_fetch_filtered_dataset_call(self):\n pool = asynctest.CoroutineMock()\n db_response = {\"referenceBases\": '', \"alternateBases\": '', \"variantType\": \"\",\n \"referenceName\": 'Chr38',\n \"frequency\": 0, \"callCount\": 0, \"sampleCount\": 0, \"variantCount\": 0,\n \"start\": 0, \"end\": 0, \"accessType\": \"PUBLIC\", \"datasetId\": \"test\"}\n pool.acquire().__aenter__.return_value = Connection(accessData=[db_response])\n assembly_id = 'GRCh38'\n position = (10, 20, None, None, None, None)\n chromosome = 1\n reference = 'A'\n alternate = ('DUP', None)\n result = await fetch_filtered_dataset(pool, assembly_id, position, chromosome, reference, alternate, None, None, False)\n # for now it can return empty dataset\n # in order to get a response we will have to mock it\n # in Connection() class\n expected = {'referenceName': 'Chr38', 'callCount': 0, 'sampleCount': 0, 'variantCount': 0, 'datasetId': 'test',\n 'referenceBases': '', 'alternateBases': '', 'variantType': '', 'start': 0, 'end': 0, 'frequency': 0,\n 'info': {'accessType': 'PUBLIC'},\n 'datasetHandover': [{'handoverType': {'id': 'CUSTOM', 'label': 'Variants'},\n 'description': 'browse the variants matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/variant/Chr38-1--'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Region'},\n 'description': 'browse data of the region matched by the query',\n 'url': 'https://examplebrowser.org/dataset/test/browser/region/Chr38-1-1'},\n {'handoverType': {'id': 'CUSTOM', 'label': 'Data'},\n 'description': 'retrieve information of the datasets',\n 'url': 'https://examplebrowser.org/dataset/test/browser'}]}\n\n self.assertEqual(result, [expected])", "def test_get(self):\n pass", "def test_get_run(self):\n pass", "def test_search_samples(self):\n self.login()\n\n page_size = 20\n query = 'batch8'\n\n # hit the API endpoint\n data = {'q': query,\n 'page': 1,\n 'page_size': page_size}\n response = self.client.get(reverse('searchsamples'), data, format='json')\n\n expected = Sample.objects.filter(batch__icontains=query).order_by(\"-received\")\n\n # format queryset into json for returning\n serializer = SampleSerializer(expected, many=True)\n\n context = {\n 'data': serializer.data,\n 'more': False\n }\n\n self.assertEqual(response.json(), context)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_plays_get(self):\n pass", "def test_GET_fetcher():\n params = {\n 'key1':'value1',\n 'arg2':'value2'\n }\n\n ## test that request goes ok\n resp = wf_utils.fetch_GET_request(\n GET_ECHO_ENDPOINT,\n params=params\n )\n\n ## test that response json can be parsed\n payload = resp.json()\n\n ## test that response contains expected echo\n assert payload['args'] == params\n assert payload['headers']['user-agent'] == wf_utils.USER_AGENT", "def test_got_data(self):\n # Create and initialize the instrument driver with a mock port agent\n driver = InstrumentDriver(self._got_data_event_callback)\n self.assert_initialize_driver(driver)\n\n self.assert_raw_particle_published(driver, True)\n\n # validating data particles are published\n self.assert_particle_published(driver, self.RASFL_STATUS_DATA, self.assert_data_particle_status, True)\n self.assert_particle_published(driver, self.RASFL_SAMPLE_DATA1, self.assert_data_particle_sample, True)\n \n # validate that a duplicate sample is not published\n self.assert_particle_not_published(driver, self.RASFL_SAMPLE_DATA1, self.assert_data_particle_sample, True)\n \n # validate that a new sample is published\n self.assert_particle_published(driver, self.RASFL_SAMPLE_DATA2, self.assert_data_particle_sample, False)" ]
[ "0.6931996", "0.6794256", "0.6794256", "0.67737925", "0.66999286", "0.6694386", "0.65940934", "0.6582905", "0.65426874", "0.6501551", "0.6394083", "0.63844264", "0.6248509", "0.62174064", "0.61818117", "0.61708593", "0.6162405", "0.6161514", "0.61414313", "0.6137429", "0.61285293", "0.6115328", "0.61124265", "0.6110309", "0.6109338", "0.60823", "0.606998", "0.6041793", "0.60268706", "0.5993543" ]
0.7451402
0
Test fetching samples to invoice.
def test_get_samples_to_invoice_query(sample_store): # GIVEN a store with a sample assert len(sample_store._get_query(table=Sample).all()) > 1 # WHEN finding samples to invoice sample = sample_store.get_samples_to_invoice_query().first() # THEN samples should be a list of samples assert isinstance(sample, Sample) # THEN it should return all samples that are not invoiced assert sample assert sample.name == "delivered"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_all_samples(self):\n self.login()\n\n page_size = 20\n\n # hit the API endpoint for both pages\n for page in range(1, 3):\n\n data = {'page': page,\n 'page_size': page_size}\n response = self.client.get(reverse('searchsamples'), data, format='json')\n\n expected = Sample.objects.all().order_by(\"-received\")\n\n paginator = Paginator(expected, page_size)\n res = paginator.page(page)\n\n # format queryset into json for returning\n serializer = SampleSerializer(res, many=True)\n\n context = {\n 'data': serializer.data,\n 'more': (page == 1)\n }\n\n self.assertEqual(response.json(), context)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_get_sample_ids(self):\n obs = self.tester._get_sample_ids(self.conn_handler)\n self.assertEqual(obs, self.exp_sample_ids)", "def test_get_sample_ids(self):\n obs = self.tester._get_sample_ids(self.conn_handler)\n self.assertEqual(obs, self.exp_sample_ids)", "def test_get_samples_not_invoiced(sample_store):\n # GIVEN a store with a sample\n assert len(sample_store._get_query(table=Sample).all()) > 1\n\n # WHEN finding samples to invoice\n samples = sample_store.get_samples_not_invoiced()\n\n # THEN samples should be a list of samples\n assert isinstance(samples, list)\n assert isinstance(samples[0], Sample)\n\n # THEN it should return all samples that are not invoiced\n assert len(samples) == len(sample_store._get_query(table=Sample).all())", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_search_samples(self):\n self.login()\n\n page_size = 20\n query = 'batch8'\n\n # hit the API endpoint\n data = {'q': query,\n 'page': 1,\n 'page_size': page_size}\n response = self.client.get(reverse('searchsamples'), data, format='json')\n\n expected = Sample.objects.filter(batch__icontains=query).order_by(\"-received\")\n\n # format queryset into json for returning\n serializer = SampleSerializer(expected, many=True)\n\n context = {\n 'data': serializer.data,\n 'more': False\n }\n\n self.assertEqual(response.json(), context)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_invoice_list(self):\n self.url = reverse(\"invoice-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "def test_api_samples(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n self.assertIn('count', r)\n self.assertIn('next', r)\n self.assertIn('prev', r)\n self.assertIn('samples', r)", "def test_get_samples_to_invoice_for_customer(\n store_with_samples_for_multiple_customers: Store,\n helpers: StoreHelpers,\n three_customer_ids: List[str],\n):\n # GIVEN a database with samples for a customer\n\n # THEN the one customer can be retrieved\n customer: Customer = store_with_samples_for_multiple_customers.get_customer_by_internal_id(\n customer_internal_id=three_customer_ids[1]\n )\n assert customer\n\n # WHEN getting the samples to invoice for a customer\n samples: List[\n Sample\n ] = store_with_samples_for_multiple_customers.get_samples_to_invoice_for_customer(\n customer=customer,\n )\n\n # THEN the samples should be returned\n assert samples\n assert len(samples) == 1\n\n assert samples[0].customer.internal_id == three_customer_ids[1]", "def test_get_records(self):\n pass", "def test_get_invoice(self):\n invoice = Invoice(self.client, 123456)\n self.assertEqual(invoice._populated, False)\n\n self.assertEqual(invoice.label, \"Invoice #123456\")\n self.assertEqual(invoice._populated, True)\n\n self.assertEqual(invoice.date, datetime(2015, 1, 1, 5, 1, 2))\n self.assertEqual(invoice.total, 9.51)", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = Sample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_get_invoice(self):\n invoice = Invoice(self.client, 123, {})\n\n self.assertEqual(invoice.date, datetime(2018, 1, 1, 0, 1, 1))\n self.assertEqual(invoice.id, 123)\n self.assertEqual(invoice.label, \"Invoice\")\n self.assertEqual(invoice.subtotal, 120.25)\n self.assertEqual(invoice.tax, 12.25)\n self.assertEqual(invoice.total, 132.5)\n self.assertIsNotNone(invoice.tax_summary)", "def test_api_sample(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n # load a sample\n r = requests.get(r['samples'][0]['url']).json()\n self.assertIn('datetime', r)\n self.assertIn('value', r)\n self.assertIn('id', r)\n self.assertIn('url', r)\n self.assertIn('sensor', r)", "def test_invoice_item_list(self):\n self.url = reverse(\"invoiceitem-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "def test_get(self):\n log.info(\"START INTEG TEST GET\")\n\n # Start sampling.\n self.clear_sample_data()\n self.driver.start_sampling()\n self.clear_async_data()\n\n # From sample file A0000010.DEC:\n # Flag record, first and last velocity record, time record.\n log.info(\"FIRST FILE A0000002 INTEG TEST GET\")\n self.create_sample_data('valid_A0000002.DEC', \"A0000002.DEC\")\n self.assert_data(None, 'valid_A0000002.yml', \n count=3, timeout=10)\n\n # From sample file A0000010.DEC:\n # Flag record, first and last velocity records twice, time record.\n log.info(\"SECOND FILE A0000004 INTEG TEST GET\")\n self.clear_async_data()\n self.create_sample_data('valid_A0000004.DEC', \"A0000004.DEC\")\n self.assert_data(None, 'valid_A0000004.yml', \n count=5, timeout=10)\n\n # Made-up data with all flags set to True.\n # Field values may not be realistic.\n log.info(\"THIRD FILE A0000003 INTEG TEST GET\")\n self.clear_async_data()\n self.create_sample_data('all_A0000003.DEC', \"A0000003.DEC\")\n self.assert_data(None, 'all_A0000003.yml', \n count=4, timeout=10)\n log.info(\"END INTEG TEST GET\")", "def test_get_invoice_items(self):\n invoice = Invoice(self.client, 123456)\n items = invoice.items\n\n self.assertEqual(len(items), 1)\n item = items[0]\n\n self.assertEqual(item.label, \"Linode 2048 - Example\")\n self.assertEqual(item.type, \"hourly\")\n self.assertEqual(item.amount, 9.51)\n self.assertEqual(item.quantity, 317)\n self.assertEqual(item.unit_price, \"0.03\")\n self.assertEqual(\n item.from_date,\n datetime(year=2014, month=12, day=19, hour=0, minute=27, second=2),\n )\n self.assertEqual(\n item.to_date,\n datetime(year=2015, month=1, day=1, hour=4, minute=59, second=59),\n )", "def test_get_invoice_items(self):\n invoice = Invoice(self.client, 123456)\n items = invoice.items\n\n self.assertEqual(len(items), 1)\n item = items[0]\n\n self.assertEqual(item.label, \"Linode 2048 - Example\")\n self.assertEqual(item.type, \"hourly\")\n self.assertEqual(item.amount, 9.51)\n self.assertEqual(item.quantity, 317)\n self.assertEqual(item.unit_price, \"0.03\")\n self.assertEqual(item.from_date, datetime(year=2014, month=12, day=19, hour=0, minute=27, second=2))\n self.assertEqual(item.to_date, datetime(year=2015, month=1, day=1, hour=4, minute=59, second=59))", "def test_client_verification_document_retrieve(self):\n pass", "def test_get(client, example_records, h, prefix):\n id_ = example_records[0].id\n\n res = client.get(f'{prefix}{id_}', headers=h)\n assert res.status_code == 200\n assert res.json['id'] == id_\n # Test links\n assert res.json['links'] == {\n 'self': 'https://127.0.0.1:5000/api/vocabularies/licenses/cc-by'\n }", "def test_get(self):\n #Validate the response\n resp = self.client.get('/api/v1/purchase-order/1/')\n self.assertEqual(resp.status_code, 200)\n \n #Validate the returned data\n obj = resp.data\n self.assertEqual(obj['id'], 1)\n self.assertEqual(obj['terms'], '0/net')\n self.assertEqual(obj['revision'], 0)\n \n #Test items\n self.assertIn('items', obj)\n self.assertEqual(len(obj['items']), 1)\n item1 = obj['items'][0]\n #self.assertIn('purchasing_units', item1)\n #self.assertEqual(item1['purchasing_units'], 'm')", "def test_get_invoice_list(mocker, expected_response, expected_data, client) -> None:\n\n mocker.patch(\"requests.Session.request\", return_value=expected_response)\n actual_data = client.get_invoice_list()\n\n assert actual_data == expected_data", "def test_get_pay_in_details(self):\n pass", "def test_get_samples_not_down_sampled(sample_store: Store, helpers: StoreHelpers, sample_id: int):\n # GIVEN a store with a sample\n assert len(sample_store._get_query(table=Sample).all()) > 1\n\n # WHEN finding samples to invoice\n samples = sample_store.get_samples_not_down_sampled()\n\n # THEN samples should be a list of samples\n assert isinstance(samples, list)\n assert isinstance(samples[0], Sample)\n\n # THEN it should return all samples in the store\n assert len(samples) == len(sample_store._get_query(table=Sample).all())", "def test_getitem(self):\n obs = self.tester['1.SKM7.640188']\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "def test_get_insumo(self):", "def test_load_book_details(self, mock_get):\n\n c = Client()\n data = {\n 'search_type': self.filter_subject,\n 'search_value': self.subject,\n }\n response = c.get('/taric_books/%s/' % self.ISBN)\n\n self.assertEqual(response.status_code, 200)", "async def test_all_samples(self):\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"248\", entities=[])", "def test_get1(self):\n pass", "def _fetch_data(self, samples):\n pass" ]
[ "0.71165115", "0.6904219", "0.6904219", "0.67734456", "0.6765537", "0.6668611", "0.6624272", "0.6623599", "0.6583024", "0.65742016", "0.6558656", "0.6528112", "0.64597166", "0.6408933", "0.63449955", "0.63117397", "0.6308055", "0.6302898", "0.62760097", "0.6271004", "0.6195979", "0.61897504", "0.61852115", "0.6184132", "0.6166181", "0.6151453", "0.61422276", "0.6141443", "0.61249727", "0.61130464" ]
0.75329405
0
Test getting samples not invoiced.
def test_get_samples_not_invoiced(sample_store): # GIVEN a store with a sample assert len(sample_store._get_query(table=Sample).all()) > 1 # WHEN finding samples to invoice samples = sample_store.get_samples_not_invoiced() # THEN samples should be a list of samples assert isinstance(samples, list) assert isinstance(samples[0], Sample) # THEN it should return all samples that are not invoiced assert len(samples) == len(sample_store._get_query(table=Sample).all())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_samples_not_down_sampled(sample_store: Store, helpers: StoreHelpers, sample_id: int):\n # GIVEN a store with a sample\n assert len(sample_store._get_query(table=Sample).all()) > 1\n\n # WHEN finding samples to invoice\n samples = sample_store.get_samples_not_down_sampled()\n\n # THEN samples should be a list of samples\n assert isinstance(samples, list)\n assert isinstance(samples[0], Sample)\n\n # THEN it should return all samples in the store\n assert len(samples) == len(sample_store._get_query(table=Sample).all())", "def test_get_sample_ids(self):\n obs = self.tester._get_sample_ids(self.conn_handler)\n self.assertEqual(obs, self.exp_sample_ids)", "def test_get_sample_ids(self):\n obs = self.tester._get_sample_ids(self.conn_handler)\n self.assertEqual(obs, self.exp_sample_ids)", "def test_filterSamples(self):\r\n exp = ['PC.356', 'PC.593']\r\n self.overview_map.filterSamples(['PC.593', 'PC.356'])\r\n obs = self.overview_map.SampleIds\r\n self.assertEqual(obs, exp)\r\n\r\n self.overview_map.filterSamples([])\r\n self.assertEqual(self.overview_map.SampleIds, [])", "def test_filterSamples(self):\n exp = ['PC.356', 'PC.593']\n self.overview_map.filterSamples(['PC.593', 'PC.356'])\n obs = self.overview_map.SampleIds\n self.assertEqual(obs, exp)\n\n self.overview_map.filterSamples([])\n self.assertEqual(self.overview_map.SampleIds, [])", "def testGenerateSamplesMeasureNotCalled(self):\n timer = timing_util.IntervalTimer()\n self.assertEqual(timer.intervals, [])\n samples = timer.GenerateSamples()\n self.assertEqual(timer.intervals, [])\n self.assertEqual(samples, [])", "def test_no_counterfactuals_found(self):\n threshold = 4.0\n self._config['Regression threshold'] = str(threshold)\n self._example = {'x_1': 1.0, 'x_2': 1.0}\n output = self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)\n self.assertEmpty(output)", "def test_sinus_does_not_trigger_alert(sim_sampler, events, data):\n for _ in range(SIMULATION_SAMPLES):\n sim_sampler.sampling_iteration()\n\n assert len(events.alerts_queue) == 0,\\\n f\"Unexpected alerts: {events.alerts_queue.active_alerts}\"", "def test_filterSamples_no_strict(self):\r\n self.overview_map.filterSamples(['PC.356', 'abc123'], strict=False)\r\n self.assertEqual(self.overview_map.SampleIds, ['PC.356'])\r\n\r\n self.empty_map.filterSamples(['foo'], strict=False)\r\n self.assertEqual(self.empty_map.SampleIds, [])", "def test_filterSamples_no_strict(self):\n self.overview_map.filterSamples(['PC.356', 'abc123'], strict=False)\n self.assertEqual(self.overview_map.SampleIds, ['PC.356'])\n\n self.empty_map.filterSamples(['foo'], strict=False)\n self.assertEqual(self.empty_map.SampleIds, [])", "def test_api_samples(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n self.assertIn('count', r)\n self.assertIn('next', r)\n self.assertIn('prev', r)\n self.assertIn('samples', r)", "def test_no_telemetry(self):\n response = self.client.get(telemetry_url)\n self.assertEqual(200, response.status_code)\n\n self.assertEqual([], json.loads(response.content))", "def test_contains_false(self):\n self.assertFalse('Not_a_Sample' in self.tester)", "def test_contains_false(self):\n self.assertFalse('Not_a_Sample' in self.tester)", "def samples(self):\n pass", "def _check_samples(self, samples):\n check = [k not in self._samples for k in samples]\n num_not_in_vcf = np.sum(check)\n if num_not_in_vcf > 0:\n with open('/tmp/excluded.subjects', 'w') as f:\n for item in samples[np.array(check)]:\n f.write(\"%s\\n\" % item)\n print(num_not_in_vcf, 'were not in vcf file and were removed; see /tmp/excluded.subjects')\n return samples[~np.array(check)]", "def test_api_sample(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n # load a sample\n r = requests.get(r['samples'][0]['url']).json()\n self.assertIn('datetime', r)\n self.assertIn('value', r)\n self.assertIn('id', r)\n self.assertIn('url', r)\n self.assertIn('sensor', r)", "async def test_no_transactions(self):\n response = await self.collect(get_request_text=\"\")\n self.assert_measurement(response, value=\"0\")", "def test_get(self):\n obs = self.tester.get('1.SKM7.640188')\n exp = PrepSample('1.SKM7.640188', self.tester)\n self.assertEqual(obs, exp)", "async def test_no_transactions(self):\n response = await self.collect(get_request_json_return_value={})\n self.assert_measurement(response, value=\"0\")", "async def test_no_transactions(self):\n response = await self.collect(get_request_json_return_value={})\n self.assert_measurement(response, value=\"0\")", "def test_SampleIds(self):\n exp = [\"PC.354\", \"PC.355\", \"PC.356\", \"PC.481\", \"PC.593\", \"PC.607\",\n \"PC.634\", \"PC.635\", \"PC.636\"]\n obs = self.overview_map.SampleIds\n self.assertEqual(obs, exp)\n\n obs = self.no_metadata.SampleIds\n self.assertEqual(obs, exp)\n\n obs = self.empty_map.SampleIds\n self.assertEqual(obs, [])", "def test_get_scans(self):\n pass", "def test_SampleIds(self):\r\n exp = [\"PC.354\", \"PC.355\", \"PC.356\", \"PC.481\", \"PC.593\", \"PC.607\",\r\n \"PC.634\", \"PC.635\", \"PC.636\"]\r\n obs = self.overview_map.SampleIds\r\n self.assertEqual(obs, exp)\r\n\r\n obs = self.no_metadata.SampleIds\r\n self.assertEqual(obs, exp)\r\n\r\n obs = self.empty_map.SampleIds\r\n self.assertEqual(obs, [])", "async def test_all_samples(self):\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"248\", entities=[])", "def test_no_source_measurements(self):\n measurement = self.measurement(self.metric())\n self.assertEqual(None, measurement[\"count\"][\"value\"])", "def test_getSampleCount(self):\r\n self.assertEqual(self.estimator1.getSampleCount(), 1)", "def test_filterSamples_strict(self):\r\n with self.assertRaises(ValueError):\r\n self.overview_map.filterSamples(['PC.356', 'abc123'])\r\n\r\n with self.assertRaises(ValueError):\r\n self.empty_map.filterSamples(['foo'])", "def test_two_unsampled_arms(self):\n self._test_two_unsampled_arms()", "def missing_tests(session):\n print('The following samples do not have tests:')\n for sample in set(ALL_SAMPLE_DIRECTORIES) - set(ALL_TESTED_SAMPLES):\n print('* {}'.format(sample))" ]
[ "0.72148484", "0.6882741", "0.6882741", "0.6842651", "0.6788283", "0.6630164", "0.6539232", "0.6498837", "0.6441554", "0.6415748", "0.63511664", "0.624929", "0.623399", "0.623399", "0.6222391", "0.62210715", "0.6196237", "0.6174369", "0.6173602", "0.6164898", "0.6164898", "0.61557376", "0.6152442", "0.61490387", "0.61364484", "0.6134899", "0.6133023", "0.6074498", "0.6064817", "0.6053097" ]
0.801496
0
Test that samples to invoice can be returned for a customer.
def test_get_samples_to_invoice_for_customer( store_with_samples_for_multiple_customers: Store, helpers: StoreHelpers, three_customer_ids: List[str], ): # GIVEN a database with samples for a customer # THEN the one customer can be retrieved customer: Customer = store_with_samples_for_multiple_customers.get_customer_by_internal_id( customer_internal_id=three_customer_ids[1] ) assert customer # WHEN getting the samples to invoice for a customer samples: List[ Sample ] = store_with_samples_for_multiple_customers.get_samples_to_invoice_for_customer( customer=customer, ) # THEN the samples should be returned assert samples assert len(samples) == 1 assert samples[0].customer.internal_id == three_customer_ids[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_single_customer(self):\n\n create_invoice = single_customer(\"Susan Wong\", \"invoice.csv\")\n create_invoice(\"test_items.csv\")\n\n # Generate list of rentals\n with open('invoice.csv', 'r') as csvfile:\n rentals = []\n for row in csvfile:\n rentals.append(row)\n\n print(rentals)\n\n # Assert statements\n self.assertEqual(rentals[3], ('Susan Wong,AT92,Office Chair,13\\n'))\n self.assertEqual(rentals[4], ('Susan Wong,KE25,Espresso Machine,30\\n'))", "def test_get_samples_to_invoice_query(sample_store):\n # GIVEN a store with a sample\n assert len(sample_store._get_query(table=Sample).all()) > 1\n\n # WHEN finding samples to invoice\n sample = sample_store.get_samples_to_invoice_query().first()\n\n # THEN samples should be a list of samples\n assert isinstance(sample, Sample)\n\n # THEN it should return all samples that are not invoiced\n assert sample\n assert sample.name == \"delivered\"", "def test_single_customer(_full_invoice):\n\n test_invoice = \"../data/test-invoice.csv\"\n items_to_insert = \"../data/items.csv\"\n csv_contents = []\n\n function = l.single_customer(\"Kyouma Hououin\", test_invoice)\n function(items_to_insert)\n\n with open(test_invoice, \"r\") as csv_file:\n contents = reader(csv_file, delimiter=',')\n for line in contents:\n if line != []:\n csv_contents += [line]\n\n csv_contents += contents\n\n assert _full_invoice == csv_contents", "def test_available_customer():\n rep = RentRepository()\n rep.store( '23','12', '1', '1')\n try:\n\n idBook = '13'\n idCustomer = '23'\n flag = '1'\n id = '1'\n Validator.available_customer(rep.get_all(), idCustomer)\n assert False\n\n except RepositoryExceptionRent as msg:\n assert True", "def test_invoice_detail(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then performing detail\n self._detail_model(\"invoice\", self.invoice_data, id, [])\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_get_invoice(self):\n invoice = Invoice(self.client, 123, {})\n\n self.assertEqual(invoice.date, datetime(2018, 1, 1, 0, 1, 1))\n self.assertEqual(invoice.id, 123)\n self.assertEqual(invoice.label, \"Invoice\")\n self.assertEqual(invoice.subtotal, 120.25)\n self.assertEqual(invoice.tax, 12.25)\n self.assertEqual(invoice.total, 132.5)\n self.assertIsNotNone(invoice.tax_summary)", "def test_get_invoice(self):\n invoice = Invoice(self.client, 123456)\n self.assertEqual(invoice._populated, False)\n\n self.assertEqual(invoice.label, \"Invoice #123456\")\n self.assertEqual(invoice._populated, True)\n\n self.assertEqual(invoice.date, datetime(2015, 1, 1, 5, 1, 2))\n self.assertEqual(invoice.total, 9.51)", "def test_data_when_import_customer_with_data(self):\n\n customer = self.customers[0]\n self.assertEqual(\"Jimena\", customer.get_first_name())\n self.assertEqual(\"Sanabria\", customer.get_last_name())\n self.assertEqual(\"21-08-1980\", customer.get_date_of_birth())\n self.assertEqual([\"Nueva Granada #1837\"], customer.get_addresses())\n self.assertEqual([4244270,70759942], customer.get_phones())\n self.assertEqual(\"[email protected]\", customer.get_email())\n self.assertEqual(\"Gold\", customer.get_membership())\n self.assertEqual(\"Active\", customer.get_status())", "def test_get_samples_not_invoiced(sample_store):\n # GIVEN a store with a sample\n assert len(sample_store._get_query(table=Sample).all()) > 1\n\n # WHEN finding samples to invoice\n samples = sample_store.get_samples_not_invoiced()\n\n # THEN samples should be a list of samples\n assert isinstance(samples, list)\n assert isinstance(samples[0], Sample)\n\n # THEN it should return all samples that are not invoiced\n assert len(samples) == len(sample_store._get_query(table=Sample).all())", "def test_case_customer_part_orders(self):\n pass", "def test_invoice_create(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n self._create_model(\"invoice\", data, [])\n self.assertIsNotNone(id)", "def test_get_customer(self):\n get_customer_url = reverse(\"customer_detail\", kwargs={\"pk\": 2})\n response = self.client.get(get_customer_url)\n\n customer_expected_json = {\n \"first_name\": \"Veronica\",\n \"last_name\": \"Ajiambo\",\n \"is_active\": True,\n }\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, customer_expected_json)", "def test_get_customers(self):\n get_customers_url = reverse(\"customer_list\")\n response = self.client.get(get_customers_url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # get data from db\n customers = Customer.objects.all()\n serializer = CustomerSerializer(customers, many=True)\n self.assertEqual(response.data, serializer.data)\n\n self.assertEqual(len(response.data), 4)", "def test_client_verification_document_retrieve(self):\n pass", "def test_get_virtual_account_beneficiaries(self):\n pass", "def test_get_invoice_list(mocker, expected_response, expected_data, client) -> None:\n\n mocker.patch(\"requests.Session.request\", return_value=expected_response)\n actual_data = client.get_invoice_list()\n\n assert actual_data == expected_data", "def test_get_customer_rental(self):\n get_customer_rentals_url = reverse(\n \"customer_rental_list\", kwargs={\"pk\": self.user1.pk}\n )\n response = self.client.get(get_customer_rentals_url)\n self.assertEqual(len(response.data), 1)", "def test_get_invoice_info(mocker, expected_response, expected_data, client) -> None:\n\n mocker.patch(\"requests.Session.request\", return_value=expected_response)\n actual_data = client.get_invoice_info(invoice_id=1)\n\n assert actual_data == expected_data", "def test_get_customer_list(self):\n customer = self._create_customers(\"Alex\")\n customer.create()\n customer = self._create_customers(\"Sally\")\n customer.create()\n customer = self._create_customers(\"John\")\n customer.create()\n resp = self.app.get(\"/customers\")\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), 3)", "def test_invoice_list(self):\n self.url = reverse(\"invoice-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "def test_get_customer(self):\n # get the id of a customer\n test_customer = self._create_customers(\"Alex\")\n logging.debug(test_customer)\n test_customer.create() \n resp = self.app.get(\n \"/customers/{}\".format(test_customer.id), content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(data[\"name\"], test_customer.name)", "def test_invoice_delete(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # then performe delete\n self._delete_model(\"invoice\", id_inv)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_client_verification_retrieve(self):\n pass", "def test_total_invoices_in_cero(self):\n sale = SaleFactory(total_value=100)\n self.assertEqual(sale.total_invoices, 0)", "def test_invoice_update(self):\n # first we create a customer\n id = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id:\n # then we can create the invoice\n data = self.invoice_data\n data[\"customer_id\"] = id\n id_inv = self._create_model(\"invoice\", data, [])\n if id_inv:\n # create another customer\n id_other = self._create_model(\"customer\", self.customer_data, [\"name\", \"email\", \"phone\"])\n if id_other:\n # then performe update\n data = self.invoice_data\n data[\"customer_id\"] = id_other\n self._update_model(\"invoice\", id, data, [])\n self.assertIsNotNone(id_other)\n self.assertIsNotNone(id_inv)\n self.assertIsNotNone(id)", "def test_get_virtual_account_beneficiary(self):\n pass", "def test_get_pay_in_details(self):\n pass", "def test_get_samples_by_customer_id_and_pattern_with_collaboration(\n store_with_samples_for_multiple_customers: Store,\n helpers: StoreHelpers,\n three_customer_ids: List[str],\n):\n # GIVEN a database with samples for a customer\n\n # THEN the one customer can be retrieved\n customer: set[Customer] = store_with_samples_for_multiple_customers.get_customer_by_internal_id(\n customer_internal_id=three_customer_ids[1]\n ).collaborators\n assert customer\n\n # WHEN getting the samples for a customer\n samples: List[\n Sample\n ] = store_with_samples_for_multiple_customers.get_samples_by_customer_id_and_pattern(\n customers=customer,\n pattern=\"sample\",\n )\n\n # THEN the samples should be returned\n assert samples\n assert len(samples) == 3\n\n for sample in samples:\n assert \"sample\" in sample.name", "def setUp(self):\n # Setup dummy custmers\n Customer.objects.create(name=\"Mike Zinyoni\", phone=\"+263784528370\", email=\"[email protected]\", address=\"Stand #11 Lorraine Drive, Bluffhill Harare Zimbabwe\")\n Customer.objects.create(name=\"Josh Nyamulomo\", phone=\"+26356839021\", email=\"[email protected]\", address=\"Stand #5 Lorraine Drive, Bluffhill Harare Zimbabwe\")\n Customer.objects.create(name=\"Brian Mpofu\", phone=\"+26390839021\", email=\"[email protected]\", address=\"Stand #25 Lorraine Drive, Bluffhill Harare Zimbabwe\")\n # Setup dummy items\n Item.objects.create(name=\"Chicken thighs\", description=\"Chunky big chicken thighs from Irvines chickens\", price=4.99, unit=\"Kg\")\n Item.objects.create(name=\"Beef steak\", description=\"Premium quality beef steak from Caswell meats\", price=6.99, unit=\"Kg\")\n Item.objects.create(name=\"Kefalos Youghgut\", description=\"Healthy and tasty youghgut available in strawberry, banana and butter milk flavour\", price=5.21, unit=\"litre\")\n Item.objects.create(name=\"Eversharp pen\", description=\"Pens available in: blue , red, green and black ink\", price=0.99, unit=\"dozen\")\n Item.objects.create(name=\"Proton Bread\", description=\"Fresh 700g bread\", price=0.9, unit=\"loaf\")\n # Setup dummy Invoice along side the invoice line\n invoice_1 = Invoice(customer=Customer.objects.get(id=1),total=0)\n invoice_1.save()\n InvoiceLine.objects.create(invoice=invoice_1,item=Item.objects.get(id=1), quantity=2, amount=(Item.objects.get(id=1).price*2))\n InvoiceLine.objects.create(invoice=invoice_1,item=Item.objects.get(id=4), quantity=1, amount=(Item.objects.get(id=4).price*1))\n InvoiceLine.objects.create(invoice=invoice_1,item=Item.objects.get(id=3), quantity=6, amount=(Item.objects.get(id=3).price*6))\n invoice_1.total = sum(invoiceLine.amount for invoiceLine in invoice_1.invoiceLines.all())\n invoice_1.save()\n \n invoice_2 = Invoice(customer=Customer.objects.get(id=3),total=0)\n invoice_2.save()\n InvoiceLine.objects.create(invoice=invoice_2,item=Item.objects.get(id=5), quantity=12, amount=(Item.objects.get(id=5).price*12))\n InvoiceLine.objects.create(invoice=invoice_2,item=Item.objects.get(id=4), quantity=2, amount=(Item.objects.get(id=4).price*2))\n invoice_2.total = sum(invoiceLine.amount for invoiceLine in invoice_2.invoiceLines.all())\n invoice_2.save()\n \n invoice_3 = Invoice(customer=Customer.objects.get(id=2),total=0)\n invoice_3.save()\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=5), quantity=12, amount=(Item.objects.get(id=5).price*12))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=4), quantity=2, amount=(Item.objects.get(id=4).price*2))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=1), quantity=2, amount=(Item.objects.get(id=1).price*2))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=4), quantity=1, amount=(Item.objects.get(id=4).price*1))\n InvoiceLine.objects.create(invoice=invoice_3,item=Item.objects.get(id=3), quantity=6, amount=(Item.objects.get(id=3).price*6))\n invoice_3.total = sum(invoiceLine.amount for invoiceLine in invoice_3.invoiceLines.all())\n invoice_3.save()\n\n invoice_4 = Invoice(customer=Customer.objects.get(id=1),total=0)\n invoice_4.save()\n InvoiceLine.objects.create(invoice=invoice_4,item=Item.objects.get(id=1), quantity=6, amount=(Item.objects.get(id=1).price*6))\n invoice_4.total = sum(invoiceLine.amount for invoiceLine in invoice_4.invoiceLines.all())\n invoice_4.save()", "def test_client_tax_information_retrieve(self):\n pass" ]
[ "0.6989047", "0.678077", "0.6628947", "0.6516833", "0.6497587", "0.64812696", "0.6476004", "0.6389455", "0.6330797", "0.63280046", "0.63094914", "0.6273231", "0.6252472", "0.6201348", "0.62011427", "0.6200815", "0.61968875", "0.6181959", "0.6151599", "0.61373514", "0.6132861", "0.61267453", "0.6123776", "0.60810757", "0.60752225", "0.60704345", "0.6055031", "0.60511875", "0.60273284", "0.6010581" ]
0.7491114
0
Test that samples can be returned for a customer.
def test_get_samples_by_customer_id_and_pattern_with_collaboration( store_with_samples_for_multiple_customers: Store, helpers: StoreHelpers, three_customer_ids: List[str], ): # GIVEN a database with samples for a customer # THEN the one customer can be retrieved customer: set[Customer] = store_with_samples_for_multiple_customers.get_customer_by_internal_id( customer_internal_id=three_customer_ids[1] ).collaborators assert customer # WHEN getting the samples for a customer samples: List[ Sample ] = store_with_samples_for_multiple_customers.get_samples_by_customer_id_and_pattern( customers=customer, pattern="sample", ) # THEN the samples should be returned assert samples assert len(samples) == 3 for sample in samples: assert "sample" in sample.name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_samples_to_invoice_for_customer(\n store_with_samples_for_multiple_customers: Store,\n helpers: StoreHelpers,\n three_customer_ids: List[str],\n):\n # GIVEN a database with samples for a customer\n\n # THEN the one customer can be retrieved\n customer: Customer = store_with_samples_for_multiple_customers.get_customer_by_internal_id(\n customer_internal_id=three_customer_ids[1]\n )\n assert customer\n\n # WHEN getting the samples to invoice for a customer\n samples: List[\n Sample\n ] = store_with_samples_for_multiple_customers.get_samples_to_invoice_for_customer(\n customer=customer,\n )\n\n # THEN the samples should be returned\n assert samples\n assert len(samples) == 1\n\n assert samples[0].customer.internal_id == three_customer_ids[1]", "def test_available_customer():\n rep = RentRepository()\n rep.store( '23','12', '1', '1')\n try:\n\n idBook = '13'\n idCustomer = '23'\n flag = '1'\n id = '1'\n Validator.available_customer(rep.get_all(), idCustomer)\n assert False\n\n except RepositoryExceptionRent as msg:\n assert True", "def test_get_customer_list(self):\n customer = self._create_customers(\"Alex\")\n customer.create()\n customer = self._create_customers(\"Sally\")\n customer.create()\n customer = self._create_customers(\"John\")\n customer.create()\n resp = self.app.get(\"/customers\")\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), 3)", "def test_get_customers(self):\n get_customers_url = reverse(\"customer_list\")\n response = self.client.get(get_customers_url)\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n # get data from db\n customers = Customer.objects.all()\n serializer = CustomerSerializer(customers, many=True)\n self.assertEqual(response.data, serializer.data)\n\n self.assertEqual(len(response.data), 4)", "def test_get_samples_not_invoiced(sample_store):\n # GIVEN a store with a sample\n assert len(sample_store._get_query(table=Sample).all()) > 1\n\n # WHEN finding samples to invoice\n samples = sample_store.get_samples_not_invoiced()\n\n # THEN samples should be a list of samples\n assert isinstance(samples, list)\n assert isinstance(samples[0], Sample)\n\n # THEN it should return all samples that are not invoiced\n assert len(samples) == len(sample_store._get_query(table=Sample).all())", "def test_get_list_customers_with_filters(client, db_session, make_customer_list):\n # Arange\n customers = make_customer_list(10)\n\n # Act\n response = client.get(\"api/customers/?name=customer\")\n response_data = response.get_json()\n\n # Assert\n assert response.status_code == status.HTTP_200_OK\n assert len(response_data[\"items\"]) == len(customers)", "def test_medicians_get(self):\n pass", "def test_get_customer(self):\n # get the id of a customer\n test_customer = self._create_customers(\"Alex\")\n logging.debug(test_customer)\n test_customer.create() \n resp = self.app.get(\n \"/customers/{}\".format(test_customer.id), content_type=\"application/json\"\n )\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(data[\"name\"], test_customer.name)", "def test_sample(self):\n ## You don't *have* to declare a doc string, but it's good\n ## practice.\n\n ## Here we're making a \"sample_call()\", passing self.token as\n ## the authentication token. For available calls and the\n ## order of arguments, check out ksapi.py. The return value\n ## will be an httplib.HTTPResponse object with additional\n ## 'body' (str) and 'obj' (dict) attributes. If a status code\n ## greater than or equal to 400 is returned from the other\n ## end, an exception will be raised; the response will be\n ## attached to the 'response' attribute of the exception, and\n ## the status will be on the 'status' attribute of the\n ## exception. Note that redirects are followed.\n resp = self.ks.sample_call(self.token, 'argument 1', 'argument 2')\n\n # Verify that resp is correct\n util.assert_equal(resp.status, 200)\n util.assert_in('sample', resp.obj)\n ## Place your various assertions about the response here.\n\n ## Rinse, lather, repeat. You should perform only a single\n ## test per test method, but if you're doing creation tests,\n ## it makes sense to include the deletion test in the same\n ## test method. Remember, the only control you have over test\n ## ordering is by setting up dependencies (@dtest.depends()).\n ## No return value is necessary.", "def test_data_when_import_customer_with_data(self):\n\n customer = self.customers[0]\n self.assertEqual(\"Jimena\", customer.get_first_name())\n self.assertEqual(\"Sanabria\", customer.get_last_name())\n self.assertEqual(\"21-08-1980\", customer.get_date_of_birth())\n self.assertEqual([\"Nueva Granada #1837\"], customer.get_addresses())\n self.assertEqual([4244270,70759942], customer.get_phones())\n self.assertEqual(\"[email protected]\", customer.get_email())\n self.assertEqual(\"Gold\", customer.get_membership())\n self.assertEqual(\"Active\", customer.get_status())", "def test_get_customer_rental(self):\n get_customer_rentals_url = reverse(\n \"customer_rental_list\", kwargs={\"pk\": self.user1.pk}\n )\n response = self.client.get(get_customer_rentals_url)\n self.assertEqual(len(response.data), 1)", "def test_customer_list(self):\n self.url = reverse(\"customer-list\")\n response = self.client.get(self.url, **self.auth_headers)\n self.assertEqual(200, response.status_code)", "def test_get_sample_ids(self):\n obs = self.tester._get_sample_ids(self.conn_handler)\n self.assertEqual(obs, self.exp_sample_ids)", "def test_get_sample_ids(self):\n obs = self.tester._get_sample_ids(self.conn_handler)\n self.assertEqual(obs, self.exp_sample_ids)", "def test_get_customer(self):\n get_customer_url = reverse(\"customer_detail\", kwargs={\"pk\": 2})\n response = self.client.get(get_customer_url)\n\n customer_expected_json = {\n \"first_name\": \"Veronica\",\n \"last_name\": \"Ajiambo\",\n \"is_active\": True,\n }\n\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data, customer_expected_json)", "def test_3(self):\n c1 = Store.Customer(\"harold\", \"qcf\", True)\n self.assertTrue(c1.is_premium_member(), \"not premium member\")", "def test_4(self):\n c1 = Store.Customer(\"harold\", \"qcf\", False)\n self.assertFalse(c1.is_premium_member(), \"IS premium member\")", "def test_get_records(self):\n pass", "def is_customer_out_sample(self, customerID):\n listCustomer = list(self._df_invoice_line_out_sample.CustomerID.unique())\n is_flag = customerID in listCustomer\n return is_flag", "def test_valid_customer(self):\n request = MockRequest()\n\n key_list = list_customer_keys(self._connection, _test_username)\n self.assertEqual(len(key_list), 1)\n key_id, key_value = key_list[0]\n\n authentication_string = compute_authentication_string(\n key_id,\n key_value,\n _test_username,\n _test_method,\n current_timestamp(),\n _test_uri\n )\n request.__dict__[\"authorization\"] = authentication_string.split()\n request.__dict__[\"method\"] = _test_method\n request.__dict__[\"headers\"] = {\n 'x-nimbus-io-timestamp' : str(current_timestamp())\n } \n request.__dict__[\"path_qs\"] = _test_uri\n\n authenticator = SqlAuthenticator()\n authenticated = authenticator.authenticate(\n self._connection, _test_username, request\n )\n self.assertTrue(authenticated)", "def test_get_customer_by_name(self):\n test_customer = self._create_customers(\"Alex\")\n test_customer.create()\n test_customer = self._create_customers(\"Sally\")\n test_customer.create()\n test_customer = self._create_customers(\"John\")\n test_customer.create()\n resp = self.app.get(\"/customers?name={}\".format(\"John\"))\n self.assertEqual(resp.status_code, status.HTTP_200_OK)\n data = resp.get_json()\n self.assertEqual(len(data), 1)\n self.assertEqual(data[0][\"name\"], test_customer.name)", "def test_get_all_samples(self):\n self.login()\n\n page_size = 20\n\n # hit the API endpoint for both pages\n for page in range(1, 3):\n\n data = {'page': page,\n 'page_size': page_size}\n response = self.client.get(reverse('searchsamples'), data, format='json')\n\n expected = Sample.objects.all().order_by(\"-received\")\n\n paginator = Paginator(expected, page_size)\n res = paginator.page(page)\n\n # format queryset into json for returning\n serializer = SampleSerializer(res, many=True)\n\n context = {\n 'data': serializer.data,\n 'more': (page == 1)\n }\n\n self.assertEqual(response.json(), context)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_conditional():\n # verify that conditioning increases the likelihood of getting a sample with the specified\n # categorical value", "def test_customer_access(self):\n # not logged-in\n for url in self.urls_get:\n response = self.client.get(url, follow=True)\n self.assertTrue(is_login_page(response))\n\n for url in self.urls_post:\n response = self.client.post(url['url'], url['data'], follow=True)\n self.assertTrue(is_login_page(response))\n\n # logged-in. Should throw a 403 or redirect to login\n self.client.login(self.user.email)\n for url in self.urls_get:\n response = self.client.get(url, follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_customer'), status_code=302,\n target_status_code=200)\n for url in self.urls_post:\n response = self.client.post(url['url'], url['data'], follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_customer'), status_code=302,\n target_status_code=200)", "def test_api_sample(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n # load a sample\n r = requests.get(r['samples'][0]['url']).json()\n self.assertIn('datetime', r)\n self.assertIn('value', r)\n self.assertIn('id', r)\n self.assertIn('url', r)\n self.assertIn('sensor', r)", "def test_verify_attrs_customer(self):\n self.assertEqual(self.customer.name, 'Customer Test')\n self.assertEqual(self.customer.document, '000.000.000-00')", "def test_client_verification_retrieve(self):\n pass", "async def test_successful_samples(self):\n self.set_source_parameter(\"test_result\", [\"success\"])\n response = await self.collect(get_request_json_return_value=self.JMETER_JSON)\n self.assert_measurement(response, value=\"242\", entities=[])", "def test_candidates_retrieve(self):\n pass", "def test_single_customer(self):\n\n create_invoice = single_customer(\"Susan Wong\", \"invoice.csv\")\n create_invoice(\"test_items.csv\")\n\n # Generate list of rentals\n with open('invoice.csv', 'r') as csvfile:\n rentals = []\n for row in csvfile:\n rentals.append(row)\n\n print(rentals)\n\n # Assert statements\n self.assertEqual(rentals[3], ('Susan Wong,AT92,Office Chair,13\\n'))\n self.assertEqual(rentals[4], ('Susan Wong,KE25,Espresso Machine,30\\n'))" ]
[ "0.6761152", "0.64114153", "0.6304436", "0.61961013", "0.6108211", "0.60567933", "0.59976864", "0.59706753", "0.5933909", "0.59289604", "0.59162027", "0.5916126", "0.5911726", "0.5911726", "0.58814156", "0.5880382", "0.5877066", "0.58492786", "0.5835973", "0.57918143", "0.57804745", "0.5769302", "0.5756398", "0.57443035", "0.57378846", "0.5716104", "0.5682065", "0.5676595", "0.5674474", "0.5671086" ]
0.6416844
1
Read in `file` and print out the frequency of words in that file.
def print_word_freq(file): text_file = open(file, 'r') contents = text_file.read() words = contents.split() def clean_text(text): text = text.lower() all_letters = "abcdefghijklmnopqrstuvwxyz" text_to_keep = "" for char in text: if char in all_letters: text_to_keep += char return text_to_keep clean_words = [] for word in words: clean_words.append(clean_text(word)) go_words = [word for word in clean_words if word not in STOP_WORDS] word_count = {} for go_word in go_words: word_count.update({go_word: go_words.count(go_word)}) sorted_word_count = sorted(word_count.items(), key=lambda x: x[1], reverse=True) longest_word_len = len(get_longest_word(words)) for word, value in sorted_word_count[:10]: print(word.rjust(longest_word_len), "|", str(value).ljust(3), "*" * value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_word_freq(file):\n with open(file) as one_today:\n text = one_today.readlines()\n d = dict()\n for line in text:\n line = line.strip()\n line = line.lower()\n line = line.translate(line.maketrans(\"\", \"\", string.punctuation))\n words = line.split(\" \")\n for word in words:\n if word in d:\n d[word] = d[word] + 1\n elif word in STOP_WORDS:\n pass\n else:\n d[word] = 1\n for word in sorted(d, key=d.get, reverse=True):\n print(word, \"|\", d[word])", "def print_word_freq(file):\n with open(file) as text:\n text_string = str(text.readlines())\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"-\", \"\")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n word_list = text_string.split()\n no_stop_words = []\n for word in word_list:\n if word in STOP_WORDS:\n pass\n else: no_stop_words.append(word)\n clean_list = {}\n for word in no_stop_words:\n clean_list[word] = no_stop_words.count(word) \n print(clean_list)", "def print_word_freq(file):\n# Opening file to be read\n with open(file, \"r\") as f:\n file_contents = f.read()\n\n\n# # Taking away punctuation and lowercase all words\n word_list = file_contents.lower().replace(',',' ').replace('.',' ').replace('!',' ').split()\n # print(word_list)\n\n nice_list = []\n for word in word_list:\n if word not in STOP_WORDS:\n nice_list.append(word)\n # print(nice_list)\n\n d = {}\n for word in nice_list:\n if word not in d.keys():\n d[word] = 1\n else:\n d[word] += 1 \n # print(sorted(d, key=d.get, reverse=True)\n # sorted(d, key=d.get, reverse=true)\n # print(d)\n\n # for word in sorted(d):\n # print((word, d[word]), end = \" \")\n\n d_filtered = sorted(d, key=d.get, reverse=True)\n for x in d_filtered:\n print(x, d[x])", "def word_frequency_from_file(cls, filename, monocase = False, pretty_print=False):\n\n # variable declarations\n all_words = [] # holds all the words\n word_pattern = r'[a-zA-Z]+'\n\n # file reading and regex logic\n with open(file=filename) as f:\n for line in f: # reading one line at a time\n match = re.findall(word_pattern, line)\n if match: # bingo\n if monocase:\n all_words += [x.lower() for x in match] # converting all to lowercase and appending to list\n else:\n all_words += match\n\n # converting all_words list to frequencies\n result = defaultdict(int)\n for word in all_words:\n result[word] += 1\n\n # del all_words\n del all_words\n\n if pretty_print:\n WordAnalysis.__pretty_print_word_frequency(result)\n return result", "def word_count_2(filename):\n\n with open(filename) as file_:\n # read file and lowercase all words\n words = file_.read().lower()\n # use translate to remove punc\n words = words.translate(None, string.punctuation)\n # call counter to count on split owrds\n word_counts = Counter(words.split())\n\n # print out items using iteritems (display, doesn't creat list) \n for word, count in word_counts.iteritems():\n print \"{} {}\".format(word, count)\n\n return word_counts", "def word_frequency_in_file(filename):\n words = {}\n fin = open(filename)\n punctuation = string.punctuation\n for line in fin:\n line = line.translate( # Replace punctuation with spaces\n str.maketrans(punctuation, ' ' * len(punctuation)))\n line = line.lower()\n line_words = line.split()\n for word in line_words: # Process each word in the line.\n if word in words:\n words[word] += 1\n else:\n words[word] = 1\n return words", "def print_wordcount(file_to_count):\n wordcount_dict = {}\n file_string = open(file_to_count).read()\n words = file_string.rstrip().split()\n \n for word in words:\n if word in wordcount_dict:\n value = wordcount_dict.get(word)\n value += 1\n wordcount_dict[word] = value\n else:\n wordcount_dict[word] = 1\n \n for key, value in wordcount_dict.items():\n print(key, value)\n \n return wordcount_dict", "def collect_frequencies(nameoffile):\n with open(nameoffile) as text:\n list_of_words = []\n for line in text:\n words = line.split()\n list_of_words = list_of_words + words\n list_of_words = [word.lower() for word in list_of_words]\n\n dict = Counter(list_of_words)\n print(dict)\n return dict", "def get_word_count(file_name):\n\n my_file = open(file_name)\n word_count = {}\n\n for line in my_file:\n stripped_line = line.rstrip()\n line_list = stripped_line.split(' ')\n line_list = [word.lower() for word in line_list]\n\n for word in line_list:\n word_count[word] = word_count.get(word, 0) + 1\n\n for word_in_count, count in word_count.iteritems():\n print \"{} {}\".format(word_in_count, count)\n\n my_file.close()\n # return word_count", "def print_word_freq(file):\n with open(file) as text:\n text = text.read().lower()\n text = text.replace(\"\\n\", \" \")\n text = text.replace(\"’\", \"\")\n # text = \" \".join(text.split())\n # print(text)\n for character in string.punctuation:\n text = text.replace(character, \"\")\n word_list = text.split()\n clean_list = []\n for word in word_list:\n if word not in STOP_WORDS:\n clean_list.append(word)\n \n\n # for stop_word in STOP_WORDS:\n # if stop_word in word_list:\n # word_list.remove(stop_word)\n\n\n new_dict = {}\n for word in clean_list:\n new_dict[word] = clean_list.count(word)\n sorted_dict = sorted(new_dict.items())\n print(sorted_dict)\n\n # print(f\"{key} | {value} {'*' * value}\")\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n\n # for word in word_list:\n # if word in string.punctuation:\n # #do something\n # if word in STOP_WORDS:\n\n \n # for stop_word in STOP_WORDS:\n # text = text.replace(stop_word, \"\")\n # print(text)", "def word_counts(file):\n words = defaultdict(int)\n regex = re.compile('[' + string.punctuation + ']')\n for line in open(file):\n for word in [regex.sub('', w) for w in line.lower().split()]:\n words[word] += 1\n\n return words", "def word_count(filename):\n \n word_counts = {}\n\n with open(filename) as file_:\n for line in file_:\n # strip white space\n words = line.split()\n # iterate over words and strip excess punctutation then add to dict\n for word in words:\n word = word.strip(\",.\\\";:?_!\").lower()\n word_counts[word] = word_counts.get(word, 0) + 1\n\n # print list of words and count\n for word, count in word_counts.iteritems():\n print \"{} {}\".format(word, count)", "def parse_file(input_file):\n # Automatically close the file after being used\n with open(input_file) as text:\n # Read file and split each word into an element in a list\n data = text.read().split()\n\n # Sort the list\n # Python sort automatically does lexical sorting\n data.sort()\n\n # For each word, use as Dictionary key and count the occurrences of the word and use as value\n frequency_table = {word: data.count(word) for word in data}\n\n # Return the frequency table\n return frequency_table", "def get_word_freq(filein):\n freq = {}\n\n # Open file handles with context manager\n with open(filein) as f:\n\n # Read a single line at a time so as not to crush memory\n for line in f:\n\n # Tokenize and iterate\n for word in line.split():\n\n # Use try/except instead of if/then for performance\n # Likely after the first 1M tweets that the key will be contained\n try:\n freq[word] += 1\n except KeyError:\n freq[word] = 1\n\n return freq", "def print_word_freq(file):\n # with open(file, 'r') as text the r as the second arguement means that my intentions are to read the file\n with open(file, 'r') as text:\n # this reads the entire file and puts this into text string\n text_string = text.read()\n # returns the string respresentation of text string without removing special characters so you can see what you need to remove\n # print(repr(text_string))\n # this removes the specified characters from the text string\n text_string = text_string.replace(\",\", \"\")\n text_string = text_string.replace(\".\", \"\")\n text_string = text_string.replace(\"—\", \" \")\n text_string = text_string.replace(\"-\", \" \")\n text_string = text_string.replace(\"?\", \"\")\n text_string = text_string.replace(\":\", \"\")\n text_string = text_string.replace(\"'\", \"\")\n text_string = text_string.replace(\"\\\\n\", \"\")\n text_string = text_string.replace(\"’\", \"\")\n text_string = text_string.replace(\"]\", \"\")\n text_string = text_string.replace(\"[\", \"\")\n text_string = text_string.replace(\"\\\"\", \"\")\n # takes the text string and makes all the characters lower case\n text_string = text_string.lower()\n # takes the text string and splits all the words into a list this splits from space to space\n words_list = text_string.split()\n # a dictionary is a key and a value\n no_stop_words = {}\n # for loop that will cycle through the words list\n for word in words_list:\n # checking to see if the word is stop words\n if word not in STOP_WORDS:\n # if the word is already in the dictionary no stop words increment the value by 1\n if word in no_stop_words:\n no_stop_words[word] += 1\n # if the word is not in the dictionary no stop words add this to the dictionary and give it a value of 1\n else:\n no_stop_words[word] = 1\n \n sorted_dict = {}\n sorted_keys = sorted(no_stop_words, key=no_stop_words.get, reverse=True)\n \n for w in sorted_keys:\n sorted_dict[w] = no_stop_words[w]\n \n for key in sorted_dict:\n print(f\"{key:>15} | {sorted_dict[key]:2} {'*' * sorted_dict[key]}\")\n \n # good practice to ensure that we are properly closing the file in use at the end of the function\n text.close()", "def print_word_freq(file):\n opened_file = open(file)\n text = opened_file.read()\n #print(text)\n #replace hyphens\n no_hyphen = text.replace(\"-\",\" \")\n #remove punctuation\n no_punctuation = \"\"\n for char in no_hyphen:\n if char not in punctuation:\n no_punctuation = no_punctuation + char\n #make everything lowercase\n lower_case_text = no_punctuation.lower()\n #print(lower_case_text)\n #split into words\n all_words = lower_case_text.split()\n #print(all_words)\n #remove stop words\n no_stop_words = []\n for each_word in all_words:\n if each_word not in STOP_WORDS:\n no_stop_words.append(each_word)\n #print(no_stop_words)\n #find the longest word to use for indention purposes\n word_length = 0\n for word in no_stop_words:\n if len(word) > word_length:\n #print (word, len(word))\n word_length = len(word)\n #print (word_length)\n #count remaining word usage\n word_counts = {}\n for word in no_stop_words:\n if word in word_counts:\n word_counts[word] +=1\n else: word_counts[word] = 1\n #print (word_counts)\n #sort words by frequency\n ordered_by_freq = (sorted(word_counts.items(), key=lambda seq: seq[1], reverse=True))\n #print (ordered_by_freq)\n #print words, freq, graph, indent, and add a space past the pipe for values less than 10\n for key, value in ordered_by_freq:\n indent = (word_length + 1 - len(key))\n space = \" \"\n star = \"*\"\n if value >= 10:\n print (indent * space, key, \" | \", value, value * star)\n else:\n print (indent * space, key, \" | \", value, value * star)\n \n\n \n \n\n \n\n \n\n\n\n #remove the stop words\n #count the frequency of the remaing words (see ex 6 for sort function)\n #output as a cord list, count and graph of *** (ex 7 for justify)", "def count_words(filename):", "def countWords(words, filename):\n\ttry:\n\t\tfile = codecs.open(filename, \"r\", \"utf8\")\n\t\ttokens = [ string.strip(string.lower(i)) for i in file.read().split() ]\n\t\tfor i in tokens:\n\t\t\twords[i] = words.get(i, 0) + 1\n\t\tfile.close()\n\texcept IOError:\n\t\tprint \"Cannot read from file:\", filename\n\treturn words", "def countWords(filename):\n with open(filename) as f:\n filetext = f.read()\n #words = re.findall(r'\\w+', filetext)\n words = re.findall(r'[a-zA-Z]+', filetext)\n return Counter(words)", "def count_words(filename):\n\ttry:\n\t\twith open(filename, encoding='utf-8') as f_obj:\n\t\t\tcontents = f_obj.read()\n\texcept FileNotFoundError:\n\t\tmsg = \"Sorry, the file \" + filename + \" does not exist.\"\n\t\tprint(msg)\n\telse:\n\t\t# Count the approximate number of words in the file.\n\t\twords = contents.split()\n\t\tnum_words = len(words)\n\t\tprint(\"The file \" + filename + \" has about \" + str(num_words) + \" words.\")", "def get_word_frequency():\n counter = Counter()\n with open('resource/word-count.txt', encoding=\"utf8\") as f:\n for line in f.readlines():\n try:\n word, count = line.split(':')\n if (word == \"RT\"):\n continue\n count = int(count)\n counter[word] += count\n except Exception as e:\n continue\n return counter", "def calculate_frequencies(file_contents):\n\n # list of uninteresting words\n uninteresting_words = [\"the\", \"a\", \"to\", \"if\", \"is\", \"it\", \"of\", \"and\", \"or\", \"an\", \"as\", \"i\", \"me\", \"my\", \\\n \"we\", \"our\", \"ours\", \"you\", \"your\", \"yours\", \"he\", \"she\", \"him\", \"his\", \"her\", \"hers\", \"its\", \"they\", \"them\", \\\n \"their\", \"what\", \"which\", \"who\", \"whom\", \"this\", \"that\", \"am\", \"are\", \"was\", \"were\", \"be\", \"been\", \"being\", \\\n \"have\", \"has\", \"had\", \"do\", \"does\", \"did\", \"but\", \"at\", \"by\", \"with\", \"from\", \"here\", \"when\", \"where\", \"how\", \\\n \"all\", \"any\", \"both\", \"each\", \"few\", \"more\", \"some\", \"such\", \"no\", \"nor\", \"too\", \"very\", \"can\", \"will\", \"just\"]\n \n # Dictionary to store frequencies\n frequencies = {}\n\n # Split the contents into words\n file_contents = file_contents.split()\n\n # loop through every word\n for line in file_contents:\n # Contatenate every word\n words = ''.join(w for w in line if w.isalpha())\n # Check if word is not a uninetersting_words\n if words.lower() not in uninteresting_words:\n # Get the number of occurences\n frequencies[words.lower()] = frequencies.get(words, 0) + 1\n\n # returns the frequencies from words\n return frequencies", "def get_word_freq_output(fname):\n try:\n words = read_words_from_file(fname) # read a list of words from the file\n\n except FileNotFoundError as e:\n return (1, str(e))\n\n else:\n downcased = make_all_lower(words) # transform all of the words to lower case\n punct_filtered = filter_punctuation(downcased) # remove punctuation\n word_filtered = remove_stop_words(punct_filtered) # remove the stop words\n word_counts = count_words(word_filtered) # get a count of all the words in the file\n pretty_word_frequency = get_word_frequency_string(word_counts) # print formatted\n return (0, pretty_word_frequency) # all done!", "def count_words(filename):\n # create a dictionary\n my_dict = {}\n with open(filename, 'r') as f:\n text = f.read()\n ''' fine, I have to use his method\n _words = text.split()\n # remove not alpha num bs\n # not gonna use anything fancy \n words = []\n for s in _words:\n w = \"\"\n for char in s:\n if char.isalpha():\n w += char\n words.append(w.lower())\n '''\n words = extract_words(text)\n \n for word in words:\n if word in my_dict:\n my_dict[word] += 1\n else:\n my_dict[word] = 1\n\n # open the file and read the text\n # extract each word in the file\n # count the number of times each word occurs.\n # return the dictionary with the word count.\n return my_dict", "def count_words(filename):\n try:\n with open(filename, encoding='utf-8') as f:\n contents = f.read()\n except FileNotFoundError:\n pass\n else:\n words = contents.split()\n num_words = len(words)\n print(f\"The file {filename} has about {num_words} words.\")\n\n #10-10", "def frequencies(filename):\n\n hashtable = QuadraticProbeTable()\n file = open(filename,'r')\n words = []\n\n for item in file:\n item = item.strip('\\n')\n if item not in hashtable:\n hashtable[item] = 1\n words.append(item)\n else: hashtable[item] = hashtable[item] + 1\n\n file = open('FREQUENCY.txt', 'w')\n words = heap_sort(words)\n for item in words: file.write(item + ' ' + str(hashtable[item]) + '\\n')", "def count_words(filename):\n\ttry:\n\t\twith open(filename, encoding = \"utf-8\") as f_odj:\n\t\t\tcontents = f_odj.read()\n\texcept FileNotFoundError:\n\t\tpass #this command tells python to print nothing when a command fails.\n\telse:\n\t\twords = contents.split()\n\t\tnum_words = len(words)\n\t\tprint(\"The file \" + filename + \" has about \" + str(num_words) + \n\t\t\t\" words.\")", "def read_words(f, words):\n with open(f) as file:\n for line in file:\n w = tokenizer.tokenize(line.strip())\n for word in w:\n try:\n words[word] += 1\n except:\n words[word] = 1", "def word_frequency():\n\n song = open(\"data/yellow_submarine.txt\")\n d = dict()\n for line in song:\n line = line.strip()\n line = line.lower()\n punctuations = \"\"\"!()-[]{};:'\"\\,<>./?@#$%^&*_~\"\"\" # remove punctuation https://www.programiz.com/python-programming/examples/remove-punctuation\n no_punct = \"\" # remove punctuation\n for char in line: # remove punctuation\n if char not in punctuations: # remove punctuation\n no_punct = no_punct + char # remove punctuation\n words = line.split(\" \")\n for word in words:\n d[word] = d.get(word, 0) + 1\n return d", "def word_count(input_file, word_freq=None):\n if word_freq is None:\n word_freq = collections.defaultdict(int)\n\n for l in input_file:\n for w in l.strip().split():\n word_freq[w] += 1\n\n return word_freq" ]
[ "0.80829746", "0.7587596", "0.7485543", "0.7430957", "0.73927045", "0.7385063", "0.7344275", "0.7333363", "0.73050195", "0.7280821", "0.72229356", "0.720026", "0.7187416", "0.714256", "0.714226", "0.7084864", "0.7044941", "0.6986009", "0.69144094", "0.687015", "0.68287617", "0.6812448", "0.676385", "0.6760171", "0.6735583", "0.67292374", "0.6693042", "0.66557336", "0.6646555", "0.66341865" ]
0.7941403
1
Test the `save_one_genome` method. Pass a taxon ID in the parameters.
def test_genbank_to_genome_taxonomy(self): result = self.gfu.genbank_to_genome(self.ctx, { 'workspace_name': self.ws_name, 'generate_ids_if_needed': 'true', # why is this a string 'taxon_id': '3702', 'file': { 'path': f"{_DATA_PATH}/wigglesworthia/genome.gb" }, 'genome_name': str(uuid4()), }) ('result', result) ref = result[0]['genome_ref'] self.assertTrue(ref, 'Genome ref exists') info = result[0]['genome_info'] typ = info[2] self.assertTrue(typ.startswith('KBaseGenomes.Genome')) info_details = info[-1] self.assertEqual(info_details['Taxonomy'], ( "cellular organisms;Eukaryota;Viridiplantae;" "Streptophyta;Streptophytina;Embryophyta;Tracheophyta;" "Euphyllophyta;Spermatophyta;Magnoliopsida;Mesangiospermae;" "eudicotyledons;Gunneridae;Pentapetalae;rosids;malvids;" "Brassicales;Brassicaceae;Camelineae;Arabidopsis" )) self.assertEqual(info_details['Size'], '697724') self.assertEqual(info_details['Source'], 'Genbank') self.assertEqual(info_details['Name'], 'Wigglesworthia glossinidia endosymbiont of Glossina brevipalpis') self.assertEqual(info_details['GC content'], '0.22479') self.assertEqual(info_details['Genetic code'], '11') self.assertEqual(info_details['Number of Genome Level Warnings'], '1') self.assertEqual(info_details['Source ID'], 'BA000021') self.assertEqual(info_details['Number of Protein Encoding Genes'], '20') self.assertEqual(info_details['Domain'], 'Eukaryota') self.assertTrue(info_details['Assembly Object']) self.assertEqual(info_details['Number contigs'], '1') self.assertEqual(info_details['Number of CDS'], '20') self.assertTrue(info_details['MD5'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_genbank_to_genome_invalid_taxon_id(self):\n result = self.gfu.genbank_to_genome(self.ctx, {\n 'workspace_name': self.ws_name,\n 'generate_ids_if_needed': 'true', # why is this a string\n 'taxon_id': '9999999999',\n 'file': {\n 'path': f\"{_DATA_PATH}/wigglesworthia/genome.gb\"\n },\n 'genome_name': str(uuid4()),\n })\n print('test_genbank_to_genome_invalid_taxon_id result', result)", "def test_get_genome_1(self):\n self.tkt1.data_add = set([\"\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.id, \"Trixie\")\n with self.subTest():\n self.assertEqual(gnm.name, \"Trixie_Draft\")\n with self.subTest():\n self.assertEqual(gnm.type, \"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.cluster, \"\")\n with self.subTest():\n self.assertEqual(gnm.subcluster, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_status, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_author, -1)\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, -1)\n with self.subTest():\n self.assertEqual(gnm.accession, \"\")", "def genome(self, genome_id=\"hg19\"):\n self.command(\"genome %(genome_id)s\" % locals())", "def test_call_on_the_fly_blast_db(self):\r\n p = BlastTaxonAssigner({\r\n 'reference_seqs_filepath': self.reference_seqs_fp,\r\n 'id_to_taxonomy_filepath': self.id_to_taxonomy_fp})\r\n actual = p(self.input_seqs_fp)\r\n\r\n self.assertEqual(actual, self.expected1)", "def test_call_existing_blast_db(self):\r\n # build the blast database and keep track of the files to clean up\r\n blast_db, files_to_remove = \\\r\n build_blast_db_from_fasta_path(self.reference_seqs_fp)\r\n self._paths_to_clean_up += files_to_remove\r\n\r\n p = BlastTaxonAssigner({'blast_db': blast_db,\r\n 'id_to_taxonomy_filepath': self.id_to_taxonomy_fp})\r\n actual = p(self.input_seqs_fp)\r\n\r\n self.assertEqual(actual, self.expected1)", "def test_ncbi_sequence_info(self):\n # one accession2taxid\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_ncbi_sequence_info\"\n params[\"input_target\"] = \"sequence\"\n params[\"taxonomy\"] = \"ncbi\"\n params[\"taxonomy_files\"] = data_dir + \"build-custom/taxdump.tar.gz\"\n params[\"ncbi_sequence_info\"] = data_dir + \"build-custom/nucl_gb.accession2taxid.gz\"\n params[\"genome_size_files\"] = data_dir + \"build-custom/species_genome_size.txt.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n # two accession2taxid, finds all on the first, skips second\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_ncbi_sequence_info_multi\"\n params[\"input_target\"] = \"sequence\"\n params[\"taxonomy\"] = \"ncbi\"\n params[\"taxonomy_files\"] = data_dir + \"build-custom/taxdump.tar.gz\"\n params[\"ncbi_sequence_info\"] = [data_dir + \"build-custom/nucl_gb.accession2taxid.gz\",\n data_dir + \"build-custom/nucl_gb.accession2taxid.gz\"]\n params[\"genome_size_files\"] = data_dir + \"build-custom/species_genome_size.txt.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n # wrong accession2taxid\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_ncbi_sequence_info_wrong\"\n params[\"input_target\"] = \"sequence\"\n params[\"taxonomy\"] = \"ncbi\"\n params[\"taxonomy_files\"] = data_dir + \"build-custom/taxdump.tar.gz\"\n params[\"ncbi_sequence_info\"] = [data_dir + \"build-custom/assembly_summary.txt\",\n data_dir + \"build-custom/nucl_gb.accession2taxid.gz\"]\n params[\"genome_size_files\"] = data_dir + \"build-custom/species_genome_size.txt.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n # fail accession2taxid\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_ncbi_sequence_info_fail\"\n params[\"input_target\"] = \"sequence\"\n params[\"taxonomy\"] = \"ncbi\"\n params[\"taxonomy_files\"] = data_dir + \"build-custom/taxdump.tar.gz\"\n params[\"ncbi_sequence_info\"] = data_dir + \"build-custom/assembly_summary.txt\" # wrong, should fail\n params[\"genome_size_files\"] = data_dir + \"build-custom/species_genome_size.txt.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertFalse(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")", "def test_get_genome_8(self):\n self.tkt1.data_add = set([\"accession\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.accession, \"ABC123\")", "def is_single_genome(self, ctx, params):\n # ctx is the context object\n # return variables are: returnVal\n #BEGIN is_single_genome\n\n if 'workspace_name' not in params:\n raise ValueError('Parameter workspace_name is not set in input arguments')\n workspace_name = params['workspace_name']\n if 'id' not in params:\n raise ValueError('Parameter id is not set in input arguments')\n objid = params['id']\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n try:\n\n objref = workspace_name + '/' + str(objid)\n\n # Note that results from the workspace are returned in a list\n returnVal = wsClient.get_objects([{'ref': objref}])[0]\n\n if returnVal is not None:\n if returnVal['data']['single_genome'] is not None:\n returnVal = returnVal['data']['single_genome']\n\n #print \"is_single_genome issingle \"+str(returnVal)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n orig_error = ''.join(' ' + line for line in lines)\n raise ValueError('Error from workspace:\\n' + orig_error)\n\n #END is_single_genome\n\n # At some point might do deeper type checking...\n if not isinstance(returnVal, int):\n raise ValueError('Method is_single_genome return value ' +\n 'returnVal is not type int as required.')\n # return the results\n return [returnVal]", "def test_get_genome_7(self):\n self.tkt1.data_add = set([\"retrieve_record\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, 1)", "def test_save(self):", "def test_save(self):", "def save(self, *args, **kwargs):\n self.chromosome_no = CHROMOSOME_STR_TO_CHROMOSOME_INT.get(self.chromosome, 0)\n super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n self.chromosome_no = CHROMOSOME_STR_TO_CHROMOSOME_INT.get(self.chromosome, 0)\n super().save(*args, **kwargs)", "def test_get_genome_2(self):\n self.tkt1.data_add = set([\"host_genus\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"Mycobacterium\")\n with self.subTest():\n self.assertEqual(gnm.cluster, \"\")", "def do_save(self, arg):\n \treturn False", "def testGridSearchSingleTargetAndSave(self):\n grid = GridSearchCV(linear_model.Ridge(), _TEST_REGRESSION_PARAM_GRID)\n grid.fit(self._X, self._Y_single)\n y_pred = grid.best_estimator_.predict(self._X)\n with open(_TEST_SERIALIZED_DATA, 'w') as f:\n serializer.save(grid.best_estimator_, f)\n ridge = serializer.load(_TEST_SERIALIZED_DATA)\n self.assertTrue((y_pred == ridge.predict(self._X)).all())", "def test_unique_genome(self):\n p1 = self.player()\n p2 = self.player()\n self.assertTrue(p1.genome is p2.genome)", "def test_saving(self):\n if self.form.is_valid():\n self.compound = self.form.save()\n self.assertIsNotNone(self.compound.id)", "def try_insert_genome(self, genome):\n raise Exception(\"called abstract insert_genome method\")", "def testEnsemblToGeneFile(self):\n\n e2g = EnsemblToGeneFile(self.enstogenefile)\n\n self.assertTrue(e2g)\n\n self.assertTrue(len(e2g.geneids) == 38803)\n self.assertTrue(len(e2g.tranids) == 94647)", "def test_client_tax_information_create(self):\n pass", "def test_get_genome_5(self):\n self.tkt1.data_add = set([\"annotation_status\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_status, \"final\")", "def test_update_sim():\n # Generate a random set of parameters, a random start date, and a Chromosome\n r_start_date, r_end_date = sga.generate_random_dates()\n r_param_ids = wp.flexible_generate()\n individual = sga.Chromosome(r_param_ids, fitness=100, start_date=r_start_date, end_date=r_end_date)\n # Put individual in the database\n db_conn = conn_to_db('optwrf_repeat.db')\n owp.insert_sim(individual, db_conn)\n print_database(db_conn)\n # Generate a new random start date and a Chromosome\n r_start_date, r_end_date = sga.generate_random_dates()\n individual = sga.Chromosome(r_param_ids, fitness=50, start_date=r_start_date, end_date=r_end_date)\n # Update the individual in the database database\n owp.update_sim(individual, db_conn)\n print_database(db_conn)", "def store_individual(self, hash, individual):\n if self.storage: \n self.storage.write_individual(hash, self.generation, individual )", "def save():", "def testSave(self):\n\n # Generate temp file path\n index = os.path.join(tempfile.gettempdir(), \"bm25\")\n os.makedirs(index, exist_ok=True)\n\n model = self.method(\"bm25\")\n model.save(index)\n model.load(index)", "def save(self, *args, **kwargs):\n empty_std_name = False\n if not self.standard_name or self.standard_name.isspace():\n empty_std_name = True\n\n empty_sys_name = False\n if not self.systematic_name or self.systematic_name.isspace():\n empty_sys_name = True\n\n if empty_std_name and empty_sys_name:\n raise ValueError(\n \"Both standard_name and systematic_name are empty\")\n\n super(Gene, self).save(*args, **kwargs) # Call the \"real\" save().", "def test_save_method(self):\n\n models.storage.save()\n self.assertTrue(os.path.exists('file.json'))", "def tax_id(self, tax_id: str):\n\n self._tax_id = tax_id", "def save(self):\n payload = {\n \"test_id\": self.test_id,\n \"test_case_name\": self.test_case_name,\n \"epoch_timestamp\": self.epoch_timestamp,\n \"human_timestamp\": self.human_timestamp,\n \"verification_name\": self.verification_name,\n \"status\": self.status,\n \"value\": self.value,\n \"boundary\": self.boundary\n }\n return self.insert_boundaries_test_evidence(self.test_case_name, payload)" ]
[ "0.624403", "0.5778086", "0.56341624", "0.56112194", "0.54687953", "0.5259953", "0.52500635", "0.52462626", "0.51927733", "0.51524097", "0.51524097", "0.512032", "0.512032", "0.5073122", "0.50706995", "0.5041771", "0.49938104", "0.49879703", "0.49636328", "0.49629033", "0.49584398", "0.4946367", "0.49146137", "0.4852444", "0.48520464", "0.48362443", "0.48190966", "0.48118356", "0.48017284", "0.47924262" ]
0.6215281
1
Test the `save_one_genome` method. Pass a taxon ID in the parameters.
def test_genbank_to_genome_invalid_taxon_id(self): result = self.gfu.genbank_to_genome(self.ctx, { 'workspace_name': self.ws_name, 'generate_ids_if_needed': 'true', # why is this a string 'taxon_id': '9999999999', 'file': { 'path': f"{_DATA_PATH}/wigglesworthia/genome.gb" }, 'genome_name': str(uuid4()), }) print('test_genbank_to_genome_invalid_taxon_id result', result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_genbank_to_genome_taxonomy(self):\n result = self.gfu.genbank_to_genome(self.ctx, {\n 'workspace_name': self.ws_name,\n 'generate_ids_if_needed': 'true', # why is this a string\n 'taxon_id': '3702',\n 'file': {\n 'path': f\"{_DATA_PATH}/wigglesworthia/genome.gb\"\n },\n 'genome_name': str(uuid4()),\n })\n ('result', result)\n ref = result[0]['genome_ref']\n self.assertTrue(ref, 'Genome ref exists')\n info = result[0]['genome_info']\n typ = info[2]\n self.assertTrue(typ.startswith('KBaseGenomes.Genome'))\n info_details = info[-1]\n self.assertEqual(info_details['Taxonomy'], (\n \"cellular organisms;Eukaryota;Viridiplantae;\"\n \"Streptophyta;Streptophytina;Embryophyta;Tracheophyta;\"\n \"Euphyllophyta;Spermatophyta;Magnoliopsida;Mesangiospermae;\"\n \"eudicotyledons;Gunneridae;Pentapetalae;rosids;malvids;\"\n \"Brassicales;Brassicaceae;Camelineae;Arabidopsis\"\n ))\n self.assertEqual(info_details['Size'], '697724')\n self.assertEqual(info_details['Source'], 'Genbank')\n self.assertEqual(info_details['Name'], 'Wigglesworthia glossinidia endosymbiont of Glossina brevipalpis')\n self.assertEqual(info_details['GC content'], '0.22479')\n self.assertEqual(info_details['Genetic code'], '11')\n self.assertEqual(info_details['Number of Genome Level Warnings'], '1')\n self.assertEqual(info_details['Source ID'], 'BA000021')\n self.assertEqual(info_details['Number of Protein Encoding Genes'], '20')\n self.assertEqual(info_details['Domain'], 'Eukaryota')\n self.assertTrue(info_details['Assembly Object'])\n self.assertEqual(info_details['Number contigs'], '1')\n self.assertEqual(info_details['Number of CDS'], '20')\n self.assertTrue(info_details['MD5'])", "def test_get_genome_1(self):\n self.tkt1.data_add = set([\"\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.id, \"Trixie\")\n with self.subTest():\n self.assertEqual(gnm.name, \"Trixie_Draft\")\n with self.subTest():\n self.assertEqual(gnm.type, \"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.cluster, \"\")\n with self.subTest():\n self.assertEqual(gnm.subcluster, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_status, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_author, -1)\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, -1)\n with self.subTest():\n self.assertEqual(gnm.accession, \"\")", "def genome(self, genome_id=\"hg19\"):\n self.command(\"genome %(genome_id)s\" % locals())", "def test_call_on_the_fly_blast_db(self):\r\n p = BlastTaxonAssigner({\r\n 'reference_seqs_filepath': self.reference_seqs_fp,\r\n 'id_to_taxonomy_filepath': self.id_to_taxonomy_fp})\r\n actual = p(self.input_seqs_fp)\r\n\r\n self.assertEqual(actual, self.expected1)", "def test_call_existing_blast_db(self):\r\n # build the blast database and keep track of the files to clean up\r\n blast_db, files_to_remove = \\\r\n build_blast_db_from_fasta_path(self.reference_seqs_fp)\r\n self._paths_to_clean_up += files_to_remove\r\n\r\n p = BlastTaxonAssigner({'blast_db': blast_db,\r\n 'id_to_taxonomy_filepath': self.id_to_taxonomy_fp})\r\n actual = p(self.input_seqs_fp)\r\n\r\n self.assertEqual(actual, self.expected1)", "def test_ncbi_sequence_info(self):\n # one accession2taxid\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_ncbi_sequence_info\"\n params[\"input_target\"] = \"sequence\"\n params[\"taxonomy\"] = \"ncbi\"\n params[\"taxonomy_files\"] = data_dir + \"build-custom/taxdump.tar.gz\"\n params[\"ncbi_sequence_info\"] = data_dir + \"build-custom/nucl_gb.accession2taxid.gz\"\n params[\"genome_size_files\"] = data_dir + \"build-custom/species_genome_size.txt.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n # two accession2taxid, finds all on the first, skips second\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_ncbi_sequence_info_multi\"\n params[\"input_target\"] = \"sequence\"\n params[\"taxonomy\"] = \"ncbi\"\n params[\"taxonomy_files\"] = data_dir + \"build-custom/taxdump.tar.gz\"\n params[\"ncbi_sequence_info\"] = [data_dir + \"build-custom/nucl_gb.accession2taxid.gz\",\n data_dir + \"build-custom/nucl_gb.accession2taxid.gz\"]\n params[\"genome_size_files\"] = data_dir + \"build-custom/species_genome_size.txt.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n # wrong accession2taxid\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_ncbi_sequence_info_wrong\"\n params[\"input_target\"] = \"sequence\"\n params[\"taxonomy\"] = \"ncbi\"\n params[\"taxonomy_files\"] = data_dir + \"build-custom/taxdump.tar.gz\"\n params[\"ncbi_sequence_info\"] = [data_dir + \"build-custom/assembly_summary.txt\",\n data_dir + \"build-custom/nucl_gb.accession2taxid.gz\"]\n params[\"genome_size_files\"] = data_dir + \"build-custom/species_genome_size.txt.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n # fail accession2taxid\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_ncbi_sequence_info_fail\"\n params[\"input_target\"] = \"sequence\"\n params[\"taxonomy\"] = \"ncbi\"\n params[\"taxonomy_files\"] = data_dir + \"build-custom/taxdump.tar.gz\"\n params[\"ncbi_sequence_info\"] = data_dir + \"build-custom/assembly_summary.txt\" # wrong, should fail\n params[\"genome_size_files\"] = data_dir + \"build-custom/species_genome_size.txt.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertFalse(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")", "def test_get_genome_8(self):\n self.tkt1.data_add = set([\"accession\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.accession, \"ABC123\")", "def is_single_genome(self, ctx, params):\n # ctx is the context object\n # return variables are: returnVal\n #BEGIN is_single_genome\n\n if 'workspace_name' not in params:\n raise ValueError('Parameter workspace_name is not set in input arguments')\n workspace_name = params['workspace_name']\n if 'id' not in params:\n raise ValueError('Parameter id is not set in input arguments')\n objid = params['id']\n\n token = ctx['token']\n wsClient = workspaceService(self.workspaceURL, token=token)\n try:\n\n objref = workspace_name + '/' + str(objid)\n\n # Note that results from the workspace are returned in a list\n returnVal = wsClient.get_objects([{'ref': objref}])[0]\n\n if returnVal is not None:\n if returnVal['data']['single_genome'] is not None:\n returnVal = returnVal['data']['single_genome']\n\n #print \"is_single_genome issingle \"+str(returnVal)\n except:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n orig_error = ''.join(' ' + line for line in lines)\n raise ValueError('Error from workspace:\\n' + orig_error)\n\n #END is_single_genome\n\n # At some point might do deeper type checking...\n if not isinstance(returnVal, int):\n raise ValueError('Method is_single_genome return value ' +\n 'returnVal is not type int as required.')\n # return the results\n return [returnVal]", "def test_get_genome_7(self):\n self.tkt1.data_add = set([\"retrieve_record\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.retrieve_record, 1)", "def test_save(self):", "def test_save(self):", "def save(self, *args, **kwargs):\n self.chromosome_no = CHROMOSOME_STR_TO_CHROMOSOME_INT.get(self.chromosome, 0)\n super().save(*args, **kwargs)", "def save(self, *args, **kwargs):\n self.chromosome_no = CHROMOSOME_STR_TO_CHROMOSOME_INT.get(self.chromosome, 0)\n super().save(*args, **kwargs)", "def test_get_genome_2(self):\n self.tkt1.data_add = set([\"host_genus\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"Mycobacterium\")\n with self.subTest():\n self.assertEqual(gnm.cluster, \"\")", "def do_save(self, arg):\n \treturn False", "def testGridSearchSingleTargetAndSave(self):\n grid = GridSearchCV(linear_model.Ridge(), _TEST_REGRESSION_PARAM_GRID)\n grid.fit(self._X, self._Y_single)\n y_pred = grid.best_estimator_.predict(self._X)\n with open(_TEST_SERIALIZED_DATA, 'w') as f:\n serializer.save(grid.best_estimator_, f)\n ridge = serializer.load(_TEST_SERIALIZED_DATA)\n self.assertTrue((y_pred == ridge.predict(self._X)).all())", "def test_unique_genome(self):\n p1 = self.player()\n p2 = self.player()\n self.assertTrue(p1.genome is p2.genome)", "def test_saving(self):\n if self.form.is_valid():\n self.compound = self.form.save()\n self.assertIsNotNone(self.compound.id)", "def try_insert_genome(self, genome):\n raise Exception(\"called abstract insert_genome method\")", "def testEnsemblToGeneFile(self):\n\n e2g = EnsemblToGeneFile(self.enstogenefile)\n\n self.assertTrue(e2g)\n\n self.assertTrue(len(e2g.geneids) == 38803)\n self.assertTrue(len(e2g.tranids) == 94647)", "def test_client_tax_information_create(self):\n pass", "def test_get_genome_5(self):\n self.tkt1.data_add = set([\"annotation_status\"])\n gnm = tickets.get_genome(self.tkt1, gnm_type=\"add\")\n with self.subTest():\n self.assertEqual(gnm.host_genus, \"\")\n with self.subTest():\n self.assertEqual(gnm.annotation_status, \"final\")", "def test_update_sim():\n # Generate a random set of parameters, a random start date, and a Chromosome\n r_start_date, r_end_date = sga.generate_random_dates()\n r_param_ids = wp.flexible_generate()\n individual = sga.Chromosome(r_param_ids, fitness=100, start_date=r_start_date, end_date=r_end_date)\n # Put individual in the database\n db_conn = conn_to_db('optwrf_repeat.db')\n owp.insert_sim(individual, db_conn)\n print_database(db_conn)\n # Generate a new random start date and a Chromosome\n r_start_date, r_end_date = sga.generate_random_dates()\n individual = sga.Chromosome(r_param_ids, fitness=50, start_date=r_start_date, end_date=r_end_date)\n # Update the individual in the database database\n owp.update_sim(individual, db_conn)\n print_database(db_conn)", "def store_individual(self, hash, individual):\n if self.storage: \n self.storage.write_individual(hash, self.generation, individual )", "def save():", "def testSave(self):\n\n # Generate temp file path\n index = os.path.join(tempfile.gettempdir(), \"bm25\")\n os.makedirs(index, exist_ok=True)\n\n model = self.method(\"bm25\")\n model.save(index)\n model.load(index)", "def save(self, *args, **kwargs):\n empty_std_name = False\n if not self.standard_name or self.standard_name.isspace():\n empty_std_name = True\n\n empty_sys_name = False\n if not self.systematic_name or self.systematic_name.isspace():\n empty_sys_name = True\n\n if empty_std_name and empty_sys_name:\n raise ValueError(\n \"Both standard_name and systematic_name are empty\")\n\n super(Gene, self).save(*args, **kwargs) # Call the \"real\" save().", "def test_save_method(self):\n\n models.storage.save()\n self.assertTrue(os.path.exists('file.json'))", "def tax_id(self, tax_id: str):\n\n self._tax_id = tax_id", "def save(self):\n payload = {\n \"test_id\": self.test_id,\n \"test_case_name\": self.test_case_name,\n \"epoch_timestamp\": self.epoch_timestamp,\n \"human_timestamp\": self.human_timestamp,\n \"verification_name\": self.verification_name,\n \"status\": self.status,\n \"value\": self.value,\n \"boundary\": self.boundary\n }\n return self.insert_boundaries_test_evidence(self.test_case_name, payload)" ]
[ "0.62149376", "0.57781327", "0.5632507", "0.56106865", "0.5468393", "0.5260755", "0.5250465", "0.5245567", "0.519301", "0.51530224", "0.51530224", "0.5118209", "0.5118209", "0.50733745", "0.50697297", "0.50424945", "0.49940592", "0.49871263", "0.4963119", "0.49630195", "0.4958367", "0.49465567", "0.4915494", "0.48516217", "0.48512", "0.48368302", "0.48179072", "0.48123592", "0.4799017", "0.47912377" ]
0.6242467
0
The function at any point in time returns if till the current input the string matches the given regular expression. It does so by comparing the current state with the end state `q3`. It also checks for `stopped` flag which sees that due to bad input the iteration of FSM had to be stopped.
def does_match(self): if self.stopped: return False return self.current_state == self.q3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def match_regex_3(s, r):\n s_len = len(s)\n r_len = len(r)\n stack = [(0, 0)]\n while stack:\n s_idx, r_idx = stack.pop()\n # Case: string is empty.\n if s_idx == s_len:\n if r_idx == r_len:\n return True\n if r[r_idx] == '*':\n stack.append((s_idx, r_idx + 1))\n continue\n # Case: string is not empty.\n if r_idx == r_len:\n continue\n regex_instruction = r[r_idx]\n if regex_instruction in ('.', s[s_idx]):\n stack.append((s_idx + 1, r_idx + 1))\n if regex_instruction == '*':\n stack.append((s_idx + 1, r_idx + 1))\n stack.append((s_idx + 1, r_idx))\n return False", "def isValid(text):\n return bool(re.search(r'\\b((kill|stop) the (alarm|clock|music))\\b', text, re.IGNORECASE))", "def isValid(text):\n return bool(re.search(r'\\b(start|stop) (look|watch|guard)ing\\b', text, re.IGNORECASE))", "def match_regex_4(s, r):\n s_len = len(s)\n r_len = len(r)\n stack = [(0, 0)]\n explored = set() # States we've already explored.\n def explore(s_idx, r_idx):\n if (s_idx, r_idx) not in explored:\n explored.add((s_idx, r_idx))\n stack.append((s_idx, r_idx))\n while stack:\n s_idx, r_idx = stack.pop()\n # Case: string is empty.\n if s_idx == s_len:\n if r_idx == r_len:\n return True\n if r[r_idx] == '*':\n explore(s_idx, r_idx + 1)\n continue\n # Case: string is not empty.\n if r_idx == r_len:\n continue\n regex_instruction = r[r_idx]\n if regex_instruction in ('.', s[s_idx]):\n explore(s_idx + 1, r_idx + 1)\n if regex_instruction == '*':\n explore(s_idx + 1, r_idx + 1)\n explore(s_idx + 1, r_idx)\n return False", "def test_regex(regular_expression, language):\r\n for case in cases:\r\n # Should it have matched?\r\n if did_match(regular_expression, case) != language(case):\r\n # Output problem case\r\n print(f\"Failure at case: {''.join(case)}\")\r\n print(f\"Regex output: {did_match(regular_expression, case)}\")\r\n print(f\"Should have been: {language(case)}\")\r\n print(\"Terminating...\")\r\n return False\r\n print(\"Regex recognizes the language for all test cases!\")\r\n return True", "def _check_logic_syntax(string):\n return logExp.matches(string)", "def read_until_regex(self, regex):\n with self.reading:\n while True:\n data = self.read_buffer.slice()\n match = regex.search(data)\n if match:\n break\n self.read_buffer.enqueue((yield self.base.read(self.bufsize)))\n do_return((self.read_buffer.dequeue(match.end()), match))", "def match(self, text):\n if self.sense:\n return (self.regex.match(text) != None)\n else:\n return (self.regex.match(text) == None)", "def did_match(regular_expression, case):\r\n # re.match returns 'None' if not matched so we cannot return it directly\r\n if re.match(regular_expression, ''.join(case)):\r\n return True\r\n return False", "def __wait_for_regex(self, regex: str, cancellable: bool = False) -> Union[str, CancelSignal]:\n log.debug(\"Waiting for a regex...\")\n while True:\n # Get the next update\n update = self.__receive_next_update()\n # If a CancelSignal is received...\n if isinstance(update, CancelSignal):\n # And the wait is cancellable...\n if cancellable:\n # Return the CancelSignal\n return update\n else:\n # Ignore the signal\n continue\n # Ensure the update contains a message\n if update.message is None:\n continue\n # Ensure the message contains text\n if update.message.text is None:\n continue\n # Try to match the regex with the received message\n match = re.search(regex, update.message.text)\n # Ensure there is a match\n if match is None:\n continue\n # Return the first capture group\n return match.group(1)", "def get_play_state():\r\n\toption = input('Choose P/p to Play, or Q/q to Quit: ').lower()\r\n\tif option == 'q':\r\n\t\treturn False\r\n\telif option == 'p':\r\n\t\treturn True\r\n\r\n\tprint('Invalid entry. Try again.')\r\n\r\n\treturn get_play_state() # Run function again until valid user input\r", "def match_regex_1(s, r):\n # Case: string is empty.\n if not s:\n if not r:\n return True\n if r[0] == '*':\n return match_regex_1(s, r[1:])\n return False\n # Case: string is not empty.\n if not r:\n return False\n regex_instruction = r[0]\n if regex_instruction in ('.', s[0]):\n return match_regex_1(s[1:], r[1:])\n if regex_instruction == '*':\n return match_regex_1(s[1:], r[1:]) or match_regex_1(s[1:], r)\n return False", "def brackets_match(string):\n expected = Stack()\n for character in string:\n if character == \"(\":\n expected.push(\")\")\n elif character == \"[\":\n expected.push(\"]\")\n elif character in \")]\":\n if expected and character == expected.top():\n expected.pop()\n else:\n return False\n return expected.is_empty()", "def isValid(text):\n return bool(re.search(r'\\blight|lights\\b', text, re.IGNORECASE))", "def isValid(text):\n return bool(re.search(r'\\bstocks\\b', text, re.IGNORECASE))", "def isMatch(self, s: str, p: str) -> bool:\n def is_match(self, text, pattern):\n if not pattern:\n return not text\n\n first_match = bool(text) and pattern[0] in {text[0], '.'}\n\n if len(pattern) >= 2 and pattern[1] == '*':\n return (self.isMatch(text, pattern[2:]) or\n first_match and self.isMatch(text[1:], pattern))\n else:\n return first_match and self.isMatch(text[1:], pattern[1:])\n\n def isMatch(self, text, pattern):\n memo = {}\n\n def dp(i, j):\n if (i, j) not in memo:\n if j == len(pattern):\n ans = i == len(text)\n else:\n first_match = i < len(text) and pattern[j] in {text[i], '.'}\n if j + 1 < len(pattern) and pattern[j + 1] == '*':\n ans = dp(i, j + 2) or first_match and dp(i + 1, j)\n else:\n ans = first_match and dp(i + 1, j + 1)\n\n memo[i, j] = ans\n return memo[i, j]\n\n return dp(0, 0)", "def end_input(self):\n inp = input()\n if inp.upper() == \"Q\":\n return False\n if inp == \"\" \\\n \"\":\n return True\n return self.end_input", "def search(self, regex):\n if isinstance(regex, str):\n regex = re.compile(regex, re.IGNORECASE)\n return regex.search(self.sequence)", "def check_input_by_regex(message, regex):\n while True:\n try:\n input_str = str(input(message)).capitalize()\n except ValueError:\n # input incorrect retry\n continue\n if not re.fullmatch(regex, input_str):\n # Value input incorrect\n continue\n else:\n return input_str", "def queue_regex(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"queue_regex\")", "def queue_regex(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"queue_regex\")", "def checkValidString(self, string: str) -> bool:\n @lru_cache(None)\n def dp(index, diff):\n \"\"\"\n index is the index of string\n diff the counts of '(' - counts of ')'\n \"\"\"\n\n if index == len_s:\n return diff == 0\n if abs(diff) > len_s - index:\n return False\n c = string[index]\n index += 1\n if c == '(':\n return dp(index, diff + 1)\n elif c == ')':\n if diff - 1 < 0:\n return False\n return dp(index, diff - 1)\n else:\n return dp(index, diff + 1) or dp(index, diff - 1) or dp(index, diff)\n\n len_s = len(string)\n return dp(0, 0)", "def match_term(self,state,re_term):\r\n return None", "def expect(self, pattern, timeout):\n import re\n prog = re.compile(pattern)\n start = time.time()\n s = ''\n while time.time() < start + timeout:\n b = self.port.read(1)\n if len(b) > 0:\n sys.stdout.write(b.decode('latin-1'))\n s += b.decode('latin-1')\n if prog.search(s) is not None:\n return True\n else:\n time.sleep(0.01)\n return False", "def test_match_regexp_including_start():\r\n runmatch(lcode)", "def validate_input(user_input: str) -> bool:\n\n if not user_input.islower():\n return False\n\n if user_input.endswith(\"yeet\"):\n return False\n \n if \"q\" or \"Q\" in user_input: # Check if q is a letter\n return False\n \n return True # If none of the conditions above are met", "def search(self, text):\n if self.sense:\n return (self.regex.search(text) != None)\n else:\n return (self.regex.search(text) == None)", "def match(self, string):\n matched = False\n cmd = None\n\n if string in self.commands.keys():\n matched = True\n cmd = string\n\n else:\n for command in self.commands.keys():\n if \"regex\" in self.commands[command].keys() \\\n and re.match(self.commands[command][\"regex\"], string):\n matched = True\n cmd = command\n break\n \n if cmd and len(cmd) > 0:\n self._last_matched_command = cmd\n else:\n self._last_matched_command = None\n\n return matched", "def isMatched(expr):\n S = Stack()\n n = len(expr)\n\n for i in range (0,n):\n \tsymb = expr[i] #next symbol\n \t# print(symb)\n\n \tif symb in ['{','(','[']:\n \t\tS.Push(symb)\n\n \telif symb in ['}',')',']']:\n\n \t\tif S.isEmpty():\n \t\t\treturn False\n \t\tif S.Top() == '{' and symb == '}':\n \t\t\tS.Pop()\n \t\telif S.Top() == '(' and symb == ')':\n \t\t\tS.Pop()\n \t\telif S.Top() == '[' and symb == ']':\n \t\t\tS.Pop()\n\n \telse:\n \t\tcontinue\n\n if S.isEmpty():\n \treturn True\n else:\n \treturn False\n\n # \telif symb in range(48,58):\n # \t\tcontinue\n\n # \telif symb in ['+','-','*','/','%']:\n # \t\tcontinue\n\n # \telse:\n # \t\tprint(\"Error\") \n # \t\treturn 0", "def match(self, token):\n try:\n if token == 'S' and is_symbol(self.the_input[self.index]) \\\n or self.the_input[self.index] == token:\n self.index += 1\n return True\n except IndexError:\n print 'Error on checking \\'' + token + \\\n '\\': the next token is empty'\n exit(1)\n print 'No' # there is improper grammar\n exit(1)" ]
[ "0.57386255", "0.5681869", "0.5660716", "0.5508027", "0.5500045", "0.53777856", "0.53166807", "0.52826416", "0.52527785", "0.5219115", "0.514601", "0.5101096", "0.5100061", "0.5095776", "0.5094811", "0.50932777", "0.5086449", "0.5085676", "0.5085387", "0.5077286", "0.5077286", "0.5044427", "0.5032902", "0.50232244", "0.5013049", "0.5007686", "0.50019044", "0.49948412", "0.49877232", "0.4987621" ]
0.65060884
0
BSURFS (7110, 71, 588, 1, 24, 190, 198, 189, 44, 188, 197, 190, 64, 106, 189, 196, 84, 195, 188, 106, 1) BSURFS 1 24 190 198 189+ $ EID2 G1 G2 G3 EID3 G1 G2 G3 + 44 188 197 190 64 106 189 196+ $ EID4 G1 G2 G3 + 84 195 188 106
def _read_bsurfs(self, data: bytes, n: int) -> int: bsurfs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def avgfst_between(fstfile):\n fstlist = fstopen(fstfile)\n return str(round(sum(fstlist)/len(fstlist), 8))", "def valid_svid(gdf):\n #define all valid satellites\n svid = ('G01', 'G02', 'G03', 'G04', 'G05', 'G06', 'G07', 'G08', 'G09', 'G10', 'G11', 'G12', 'G13', 'G14', 'G15', 'G16', 'G17', 'G18', 'G19', 'G20', 'G21', 'G22', 'G23', 'G24', 'G25', 'G26', 'G27', 'G28', 'G29', 'G30', 'G31', 'G32',\n'R01', 'R02', 'R03', 'R04', 'R05', 'R06', 'R07', 'R08', 'R09', 'R10', 'R11', 'R12', 'R13', 'R14', 'R15', 'R16', 'R17', 'R18', 'R19', 'R20', 'R21', 'R22', 'R23', 'R24',\n'C01', 'C02', 'C03', 'C04', 'C05', 'C06', 'C07', 'C08', 'C09', 'C10', 'C11', 'C12', 'C13', 'C14', 'C15', 'C16', 'C17', 'C18', 'C19', 'C20', 'C21', 'C22', 'C23', 'C24', 'C25', 'C26', 'C27', 'C28', 'C29', 'C30', 'C31', 'C32', 'C33', 'C34', 'C35', 'C36', 'C37',\n'E01', 'E02', 'E03', 'E04', 'E05', 'E06', 'E07', 'E08', 'E09', 'E10', 'E11', 'E12', 'E13', 'E14', 'E15', 'E16', 'E17', 'E18', 'E19', 'E20', 'E21', 'E22', 'E23', 'E24', 'E25', 'E26', 'E27', 'E28', 'E29', 'E30', 'E31', 'E32', 'E33', 'E34', 'E35', 'E36')\n \n gdf = gdf.loc[gdf['svid'].isin(svid),['svid','time','Cn0DbHz','geometry']]\n \n return gdf", "def get_ref_values(sbf):\n\n filename = path.join(REFERENCE_DIR, \"{}.npy\".format(sbf))\n values = np.split(np.load(filename), ANGULAR_POINTS + 1)\n return values[0], values[1:]", "def calcSfb(Idc,I3, R, Tj,g1,g2,g3):\n t = np.linspace(0, 1, 1000, endpoint=False)\n It = Idc[:, None]+I3*np.cos(2*np.pi*t[None, :])\n St = qnoise_fit.Sii(It, R, Tj)\n Dt = qnoise_fit.dSiidV(It, R, Tj)\n S0 = qnoise_fit.Fcoef(St, t, 0)\n D3 = qnoise_fit.Fcoef(Dt, t, 1)[0]\n S3 = qnoise_fit.Fcoef(St, t, 1)[0]\n D0 = qnoise_fit.Fcoef(Dt, t, 0)\n D6 = qnoise_fit.Fcoef(Dt, t, 2)[0]\n return g1*S0*D3+g2*S3*D0+g3*D6*S3", "def get_basis_from_BSE(structure, basis, full=False, if_missing=None):\n\n base_url = \"http://basissetexchange.org\"\n basis = basis.lower()\n\n headers = {\n 'User-Agent': 'BSE Example Python Script',\n 'From': '[email protected]'\n }\n\n r = req.get(base_url + '/api/basis/{}/format/gaussian94'.format(basis),\n headers=headers\n )\n\n description = '\\n'.join([line for line in r.text.split('\\n') if '!' in line])\n\n r = req.get(base_url + '/api/references/{}/format/bib'.format(basis),\n headers=headers\n )\n\n citation = r.text\n\n symbols = structure.get_symbols()\n if not full:\n symbols = np.unique(symbols)\n\n atoms = []\n for symbol in symbols:\n\n params = {'elements': [symbol]}\n r = req.get(base_url + '/api/basis/{}/format/gaussian94'.format(basis),\n params=params,\n headers=headers\n )\n # https://www.basissetexchange.org/basis/coemd-2/format/gaussian94/?version=0&elements=6,7,8\n if r.status_code != 200:\n if if_missing is not None:\n r = req.get(base_url + '/api/basis/{}/format/gaussian94'.format(if_missing),\n params=params,\n headers=headers\n )\n if r.status_code != 200:\n raise Exception('Basis {}, {} not found for atom {} in BSE'.format(basis, if_missing, symbol))\n else:\n #raise RuntimeError(\"Could not obtain data from the BSE. Check the error information above\")\n raise Exception('Basis {} not found for atom {} in BSE'.format(basis, symbol))\n\n basis_data = []\n for line in r.text.split('\\n'):\n if len(line) !=0 and line[0] not in ['!']:\n basis_data.append(line.replace('D+', 'E+').replace('D-', 'E-'))\n\n\n atoms.append(_txt_to_basis_dict(basis_data))\n\n basis_set = {'name': basis,\n 'primitive_type': 'gaussian',\n 'atoms': atoms}\n\n return basis_set", "def inst_bp(instrument,array=\"2\"):\n\n if instrument == \"MUSTANG2\" or instrument == \"MUSTANG\":\n srms = (300*u.um).to(\"m\") # surface RMS (microns)\n ### Reference: https://science.nrao.edu/facilities/gbt/proposing/GBTpg.pdf\n EA90 = 0.36 # Aperture efficiency at 90 GHz\n ### The beam efficiencies should be taken as 1.37* Aperture Efficiency\n R90 = np.exp(-4.0*np.pi*(srms/(const.c/(9.0e10*u.s**-1))).value) #\n Gnot = EA90/R90 # Unphysical, but see documentation...\n if instrument == \"MUSTANG2\":\n flow = 75.0 # GHz\n fhig = 105.0 # GHz\n else:\n flow = 82.5 # GHz\n fhig = 97.5 # GHz\n \n farr = np.arange(flow,fhig,1.0) # frequency array.\n tran = farr*0.0 + 1.0 # Let the transmission be unity everywhere.\n Larr = const.c.value/(farr*1.0e9) # Keep calm and carry on.\n ### Old formula:\n #Ruze = Gnot * np.exp(-4.0*np.pi*(srms.value)/Larr)\n ### Correct formula: (10 April 2018)\n Ruze = Gnot * np.exp(-(4.0*np.pi*srms.value/Larr)**2)\n NRuz = Ruze / np.max(Ruze) # Normalize it\n band = tran * Ruze # Bandpass, with (unnormalized) Ruze efficiency\n \n if instrument == \"NIKA2\" or instrument == \"NIKA\":\n caldir='/home/romero/NIKA2/NIKA_SVN/Processing/Pipeline/Calibration/BP/'\n bpfile=caldir+'Transmission_2017_Jan_NIKA2_v1.fits'\n hdulist = fits.open(bpfile)\n\n if array == \"1H\": # 1mm (260 GHz) array, Horizontal Polarization\n tbdata = hdulist[1].data # 1H\n freq = tbdata.field(0)\n tran = tbdata.field(1)\n erro = tbdata.field(2)\n atmt = tbdata.field(3)\n cfreq1h = np.sum(freq*tran)/np.sum(tran)\n \n if array == \"1V\": # 1mm (260 GHz) array, Vertical Polarization\n tbdata = hdulist[2].data # 1V\n freq = tbdata.field(0)\n tran = tbdata.field(1)\n erro = tbdata.field(2)\n atmt = tbdata.field(3)\n cfreq1v = np.sum(freq*tran)/np.sum(tran)\n \n if array == \"2\": # 2mm (150 GHz) array\n tbdata = hdulist[3].data # 2\n freq = tbdata.field(0)\n tran = tbdata.field(1)\n erro = tbdata.field(2)\n atmt = tbdata.field(3)\n cfreq2 = np.sum(freq*tran)/np.sum(tran)\n\n ### Trim the zero-frequency listing, if any.\n gi=np.where(freq > 0)\n freq = freq[gi]\n tran = tran[gi]\n erro = erro[gi]\n atmt = atmt[gi]\n \n### Calculate Aperture efficiencies from information found at:\n### http://www.iram.es/IRAMES/mainwiki/Iram30mEfficiencies\n Beff = 0.630 # at 210 GHz\n Aeff = Beff/1.27 # See text on webpage\n srms = (66.0*u.um).to(\"m\") # surface RMS (microns)\n R210 = np.exp(-4.0*np.pi*(srms/(const.c/(2.1e11*u.s**-1))).value) #\n Gnot = Aeff/R210 # Unphysical, but see documentation...\n\n Larr = const.c.value/(freq*1.0e9) # Keep calm and carry on. \n Ruze = Gnot * np.exp(-4.0*np.pi*(srms.value)/Larr)\n NRuz = Ruze / np.max(Ruze) # Normalize it\n band = tran * Ruze # Bandpass, with (unnormalized) Ruze efficiency\n farr = freq\n \n#########################################################################\n\n if instrument == 'ACT90':\n srms = (27.0*u.um).to(\"m\") # surface RMS (microns)\n EA90 = 0.95 # I'm making this number up...\n R90 = np.exp(-4.0*np.pi*(srms/(const.c/(9.0e10*u.s**-1))).value) #\n Gnot = EA90/R90 # Unphysical, but see documentation...\n flow = 65.0 # GHz\n fhig = 125.0 # GHz\n farr = np.arange(flow,fhig,1.0) # frequency array.\n freq_ref = 90.0 # I took EA90 to be a fictitious aperature efficiency at 90 GHz\n band = ruze_eff(farr,freq_ref,EA90,srms)\n\n if instrument == 'ACT150':\n srms = (27.0*u.um).to(\"m\") # surface RMS (microns)\n EA90 = 0.95 # I'm making this number up...\n R90 = np.exp(-4.0*np.pi*(srms/(const.c/(9.0e10*u.s**-1))).value) #\n Gnot = EA90/R90 # Unphysical, but see documentation...\n flow = 120.0 # GHz\n fhig = 180.0 # GHz\n farr = np.arange(flow,fhig,1.0) # frequency array.\n freq_ref = 90.0 # I took EA90 to be a fictitious aperature efficiency at 90 GHz\n band = ruze_eff(farr,freq_ref,EA90,srms)\n\n\n return band, farr", "def DFTB_SCF(struc, skf_dir, kresol=0.10, folder='tmp', disp=None, filename=None):\n if not os.path.exists(folder):\n os.makedirs(folder)\n cwd = os.getcwd()\n os.chdir(folder)\n\n kpts = Kgrid(struc, kresol)\n atom_types = set(struc.get_chemical_symbols())\n kwargs = make_Hamiltonian(skf_dir, atom_types, disp, kpts, write_band=True)\n\n calc = Dftb(label='test',\n atoms=struc,\n kpts=kpts,\n **kwargs,\n )\n\n struc.set_calculator(calc)\n struc.get_potential_energy()\n eigvals = calc.read_eigenvalues()[0]\n ne = calc.read_electrons()\n nband = int(ne/2)\n vbm = eigvals[:, nband-1].max()\n cbm = eigvals[:, nband].min()\n gap = cbm - vbm\n #if filename is not None:\n # # plot band structure\n os.chdir(cwd)\n return gap", "def __bytes__(self):\n line1=self.name.encode(\"ascii\").ljust(24,b\" \")\n line2=b\"1 %05dU %02d%03d%-3b %02d%012.8f %c.%08d %c%05d%+01d %c%05d%+01d 0 %04d\" %\\\n (self.id,self.desig[\"year\"]%100,self.desig[\"launch\"],\\\n self.desig[\"object\"].encode(\"ascii\"),self.epoch[\"year\"]%100,\\\n self.epoch[\"day\"],b\"-\" if self.fdmm<0 else b\" \",abs(self.fdmm*1.e8),\\\n b\"-\" if self.sdmm<0 else b\" \",\\\n abs(self.sdmm*pow(10,5-(ceil(log(abs(self.sdmm),10)) if \\\n abs(self.sdmm)>0 else 0))),\\\n (ceil(log(abs(self.sdmm),10)) if abs(self.sdmm)>0 else 0),\\\n b\"-\" if self.bstar<0 else b\" \",\\\n abs(self.bstar*pow(10,5-(ceil(log(abs(self.bstar),10)) if \\\n abs(self.bstar)>0 else 0))),\\\n (ceil(log(abs(self.bstar),10)) if abs(self.bstar)>0 else 0),\\\n self.nr,)\n line3=b\"2 %05d %08.4f %08.4f %07d %08.4f %08.4f %011.8f%05d\" %\\\n (self.id,self.inc,self.raan,self.ecc*1.e7,self.aop,\\\n self.ma,self.mm,self.revol,)\n l2cs=0\n for c in line2:\n bc=bytes([c])\n if bc.isdigit():\n l2cs+=int(bc.decode(\"ascii\"))\n elif bc==b\"-\":\n l2cs+=1\n l2cs%=10\n\n l3cs=0\n for c in line3:\n bc=bytes([c])\n if bc.isdigit():\n l3cs+=int(bc.decode(\"ascii\"))\n elif bc==b\"-\":\n l3cs+=1\n l3cs%=10\n return line1+b\"\\r\\n\"+line2+str(l2cs).encode(\"ascii\")+b\"\\r\\n\"+line3+\\\n str(l3cs).encode(\"ascii\")+b\"\\r\\n\"", "def read_srim(fp):\n en_units = {'eV': 1e-6, 'keV': 1e-3, 'MeV': 1, 'GeV': 1e3}\n dist_units = {'um': 1e-4, 'mm': 1e-1, 'cm': 1, 'm': 1e2}\n\n res = []\n\n with open(fp) as f:\n for line in f:\n if 'Density' in line:\n litems = line.strip().split()\n gas_dens = float(litems[3])\n assert litems[4] == 'g/cm3', 'Units for density are not g/cm3: {}'.format(litems[4])\n if 'Straggling' in line:\n f.readline()\n break\n for line in f:\n if '-------' in line:\n break\n litems = line.strip().split()\n if len(litems) != 10:\n raise ValueError('Wrong number of entries in line!')\n\n en = float(litems[0]) * en_units[litems[1]]\n dedx_elec = float(litems[2]) * 1000 # convert MeV/(mg/cm^2) to MeV/(g/cm^2)\n dedx_nuc = float(litems[3]) * 1000\n proj_range = float(litems[4]) * dist_units[litems[5]] * gas_dens\n\n res.append({'energy': en, 'dedx': dedx_elec + dedx_nuc, 'range': proj_range})\n\n return res", "def Fbol_SDSSg(Teff_vec, gmag_vec):\n Teff_vec = np.float64(Teff_vec)\n \n hdulist = fits.open('../Scratchwork/SDSS_filter_curves.fits')\n data = hdulist[2].data\n wvln = data['wavelength'] #ang\n resp = data['respt']\n wvln = wvln/1e4 #microns\n minw, maxw = wvln[0], wvln[-1]\n thru = interp1d(wvln, resp, bounds_error=False, fill_value=0.0)\n \n hc_k = 14388.8 #microns*K\n hc4pi = 4.99 #10^18 J*micron\n rad = 7.57e-16 #10^18 J micron-3 K-4\n def g2bol_ratio(Teff):\n plnk = lambda w: hc4pi / w**5 / (np.exp(hc_k/Teff/w) - 1.0) #J micron-4\n total = rad*Teff**4 #J micron-3\n part = quad(lambda w: plnk(w)*thru(w), minw, maxw)[0]\n return part/total\n #I checked and integrating plnk from 0 to inf gives the same as total\n \n g2bol_vec = np.array(map(g2bol_ratio, Teff_vec))\n FgF0 = 10**(-2.5*gmag_vec) #ratio of flux to a reference flux of mag 0\n FbolF0 = FgF0/g2bol_vec\n return FbolF0", "def get_signal_gwgds1072au(a_signal_packed: bytes, a_scale : float ) -> list:\n the_return = None\n the_signal_packed=a_signal_packed\n the_scale=a_scale\n the_signal_sequence=[]\n the_signal=0.0 #TODO reminder check this before allowing it\n the_info=[]\n n=4\n bla=0\n blb=bla+n\n print(the_signal_packed)\n JX=unpack('>%sh' % 2 ,the_signal_packed[bla:blb])\n for ii in range(0,2003):\n the_info.append(unpack('>%sh' % 2 ,the_signal_packed[bla:blb])[0])\n bla=bla+n\n blb=blb+n\n #TODO get the potential scale\n #TODO get the offset\n #TODO get the time scale\n\n return the_info", "def parse_genbank(email = \"[email protected]\", ref_id = \"NC_045512.2\"):\n ## ============ Fetch genbank record ============ ##\n # Set email \n Entrez.email = email\n # Make handel object \n handle = Entrez.efetch(db=\"nuccore\", id=ref_id, rettype=\"gb\", retmode=\"text\")\n # Save the record -- only extract first record (there should be only one)\n record = next(SeqIO.parse(handle, \"gb\"))\n \n ## ============ Parse genbank record ============ ##\n # Dictionary to hold the open reading frames\n ORFS = dict()\n for feature in record.features:\n # Only extract the coding sequences\n if feature.type == \"CDS\": \n # Special considerations for overlapping ORF\n if feature.qualifiers.get(\"gene\")[0] == \"ORF1ab\":\n # Get the open reading frame that contains the ribosomal slippage\n if \"-1 ribosomal frameshift\" in str(feature.qualifiers.get(\"note\")): \n # Extract the non-overlapping and frameshifted indices\n name = \"ORF1ab\"\n ORFS[name] = feature\n # Get the open reading frame that just contains the 'a' portion\n else:\n # Extract the non-overlapping and frameshifted indices\n name = \"ORF1a\"\n ORFS[name] = feature\n # Iterate ove the remaining trivial CDS \n else:\n # Build the lookup dictionary with the normal sequences\n name = feature.qualifiers.get(\"gene\")[0]\n ORFS[name] = feature\n # Return Lookup dictionary\n return ORFS, record.seq", "def dbsnp_freq(record):\n try:\n kg = re.search(r\"(CAF=)([0-9,.e-]+)\", record[7]).group(2)\n kg_af = float(kg.split(\",\")[1])\n except:\n kg_af = -1\n\n try:\n topmed = re.search(r\"(TOPMED=)([0-9,.e-]+)\", record[7]).group(2)\n topmed_af = float(topmed.split(\",\")[1])\n except:\n topmed_af = -1\n\n return max(kg_af, topmed_af)", "def get_DIV2k_data_QF(pLow, pFull, bs: int, sz: int):\n src = ImageImageList.from_folder(pLow, presort=True).split_by_idxs(\n train_idx=list(range(0, 800)), valid_idx=list(range(800, 900)))\n\n data = (src.label_from_func(\n lambda x: pFull/(x.name)\n ).transform(\n get_transforms(\n max_zoom=2.\n ),\n tfm_y=True\n ).databunch(bs=bs, num_workers=8, no_check=True)\n .normalize(imagenet_stats, do_y=True))\n data.c = 3\n return data", "def getBins(fs):\n\tdN = {# fs:(binMin,binStep,binMax)\n\t\t 10:(0 , 1.25e-06, 0.25e-03), # for 1 Hz LPF (fsNew=10)\n\t\t 50:(0 , 2.5e-06, 0.5e-03), # for 6 Hz LPF (fsNew=50)\n\t\t 250:(0 , 25e-06 , 5e-03),\n\t\t 500:(0 , 50e-06 , 10e-03),\n\t\t1000:(0 ,100e-06 , 20e-03)\n\t}\n\tbinMin,binStep,binMax = dN.get(fs, (False,False,False))\n\tif not binStep: raise '\\nunaccounted for sample rate = %f\\n' % fs\n\treturn binMin,binStep,binMax", "def bfr2generic(native_img):\n n_records = native_img.lat.shape[0]\n generic_data = get_template_ASCATL2_SMX(n_records)\n\n fields = [('jd', 'jd', None),\n ('sat_id', 'Satellite Identifier', None),\n ('abs_line_nr', None, None),\n ('abs_orbit_nr', 'Orbit Number', None),\n ('node_num', 'Cross-Track Cell Number', None),\n ('line_num', 'line_num', None),\n ('as_des_pass', 'as_des_pass', None),\n ('swath', 'swath_indicator', None),\n ('azif', 'f_Antenna Beam Azimuth', 1.7e+38),\n ('azim', 'm_Antenna Beam Azimuth', 1.7e+38),\n ('azia', 'a_Antenna Beam Azimuth', 1.7e+38),\n ('incf', 'f_Radar Incidence Angle', 1.7e+38),\n ('incm', 'm_Radar Incidence Angle', 1.7e+38),\n ('inca', 'a_Radar Incidence Angle', 1.7e+38),\n ('sigf', 'f_Backscatter', 1.7e+38),\n ('sigm', 'm_Backscatter', 1.7e+38),\n ('siga', 'a_Backscatter', 1.7e+38),\n ('sm', 'Surface Soil Moisture (Ms)', 1.7e+38),\n (\n 'sm_noise', 'Estimated Error In Surface Soil Moisture', 1.7e+38),\n ('sm_sensitivity', 'Soil Moisture Sensitivity', 1.7e+38),\n ('sig40', 'Backscatter', 1.7e+38),\n ('sig40_noise',\n 'Estimated Error In Sigma0 At 40 Deg Incidence Angle', 1.7e+38),\n ('slope40', 'Slope At 40 Deg Incidence Angle', 1.7e+38),\n ('slope40_noise',\n 'Estimated Error In Slope At 40 Deg Incidence Angle', 1.7e+38),\n ('dry_backscatter', 'Dry Backscatter', 1.7e+38),\n ('wet_backscatter', 'Wet Backscatter', 1.7e+38),\n ('mean_surf_sm', 'Mean Surface Soil Moisture', 1.7e+40),\n ('correction_flag', 'Soil Moisture Correction Flag', 1.7e+38),\n ('processing_flag', 'Soil Moisture Processing Flag', 1.7e+38),\n ('aggregated_quality_flag', None),\n ('snow_cover_probability', 'Snow Cover', 1.7e+38),\n ('frozen_soil_probability', 'Frozen Land Surface Fraction',\n 1.7e+38),\n ('innudation_or_wetland', 'Inundation And Wetland Fraction',\n 1.7e+38),\n ('topographical_complexity', 'Topographic Complexity', 1.7e+38)]\n\n for field in fields:\n if field[1] is None:\n continue\n\n if field[2] is not None:\n valid_mask = (native_img.data[field[1]] != field[2])\n generic_data[field[0]][valid_mask] = native_img.data[field[1]][\n valid_mask]\n else:\n generic_data[field[0]] = native_img.data[field[1]]\n\n # convert sat_id (spacecraft id) to the intern definition\n sat_id_lut = np.array([0, 0, 0, 4, 3, 5])\n generic_data['sat_id'] = sat_id_lut[generic_data['sat_id']]\n\n img = Image(native_img.lon, native_img.lat, generic_data,\n native_img.metadata, native_img.timestamp,\n timekey='jd')\n\n return img", "def _readBTS(self,fname):\n with BinaryFile(fname) as f:\n #\n # read header info\n #\n if self.verbose: print('Reading header information from',fname)\n\n ID = f.read_int2()\n assert( ID==7 or ID==8 )\n if ID==7: filetype = 'non-periodic'\n elif ID==8: filetype = 'periodic'\n else: filetype = 'UNKNOWN'\n if self.verbose:\n print(' id= {:d} ({:s})'.format(ID,filetype))\n\n # - read resolution settings\n self.NZ = f.read_int4()\n self.NY = f.read_int4()\n self.Ntower = f.read_int4()\n if self.verbose:\n print(' NumGrid_Z,_Y=',self.NZ,self.NY)\n print(' ntower=',self.Ntower)\n self.N = f.read_int4()\n self.dz = f.read_float(dtype=self.realtype)\n self.dy = f.read_float(dtype=self.realtype)\n self.dt = f.read_float(dtype=self.realtype)\n self.period = self.realtype(self.N * self.dt)\n self.Nsize = 3*self.NY*self.NZ*self.N\n if self.verbose:\n print(' nt=',self.N)\n print(' (problem size: {:d} points)'.format(self.Nsize))\n print(' dz,dy=',self.dz,self.dy)\n print(' TimeStep=',self.dt)\n print(' Period=',self.period)\n\n # - read reference values\n self.uhub = f.read_float(dtype=self.realtype)\n self.zhub = f.read_float(dtype=self.realtype) # NOT USED\n self.zbot = f.read_float(dtype=self.realtype)\n if self.Umean is None:\n self.Umean = self.uhub\n if self.verbose:\n print(' Umean = uhub =',self.Umean,\n '(for calculating fluctuations)')\n else: # user-specified Umean\n if self.verbose:\n print(' Umean =',self.Umean,\n '(for calculating fluctuations)')\n print(' uhub=',self.uhub,' (NOT USED)')\n if self.verbose:\n print(' HubHt=',self.zhub,' (NOT USED)')\n print(' Zbottom=',self.zbot)\n\n # - read scaling factors\n self.Vslope = np.zeros(3,dtype=self.realtype)\n self.Vintercept = np.zeros(3,dtype=self.realtype)\n for i in range(3):\n self.Vslope[i] = f.read_float(dtype=self.realtype)\n self.Vintercept[i] = f.read_float(dtype=self.realtype)\n if self.verbose:\n # output is float64 precision by default...\n print(' Vslope=',self.Vslope)\n print(' Vintercept=',self.Vintercept)\n\n # - read turbsim info string\n nchar = f.read_int4()\n version = f.read(N=nchar)\n if self.verbose: print(version)\n\n #\n # read normalized data\n #\n # note: need to specify Fortran-order to properly read data using np.nditer\n t0 = time.process_time()\n if self.verbose: print('Reading normalized grid data')\n\n self.U = np.zeros((3,self.NY,self.NZ,self.N),order='F',dtype=self.realtype)\n self.T = np.zeros((self.N,self.NY,self.NZ))\n if self.verbose:\n print(' U size :',self.U.nbytes/1024.**2,'MB')\n\n for val in np.nditer(self.U, op_flags=['writeonly']):\n val[...] = f.read_int2()\n self.U = self.U.swapaxes(3,2).swapaxes(2,1) # new shape: (3,self.N,self.NY,self.NZ)\n\n if self.Ntower > 0:\n if self.verbose:\n print('Reading normalized tower data')\n self.Utow = np.zeros((3,self.Ntower,self.N),\n order='F',dtype=self.realtype)\n if self.verbose:\n print(' Utow size :',self.Utow.nbytes/1024.**2,'MB')\n for val in np.nditer(self.Utow, op_flags=['writeonly']):\n val[...] = f.read_int2()\n\n if self.verbose:\n print(' Read velocitiy fields in',time.process_time()-t0,'s')\n \n #\n # calculate dimensional velocity\n #\n if self.verbose:\n print('Calculating velocities from normalized data')\n for i in range(3):\n self.U[i,:,:,:] -= self.Vintercept[i]\n self.U[i,:,:,:] /= self.Vslope[i]\n if self.Ntower > 0:\n self.Utow[i,:,:] -= self.Vintercept[i]\n self.Utow[i,:,:] /= self.Vslope[i]\n self.U[0,:,:,:] -= self.Umean # uniform inflow w/ no shear assumed\n\n print(' u min/max [',np.min(self.U[0,:,:,:]),\n np.max(self.U[0,:,:,:]),']')\n print(' v min/max [',np.min(self.U[1,:,:,:]),\n np.max(self.U[1,:,:,:]),']')\n print(' w min/max [',np.min(self.U[2,:,:,:]),\n np.max(self.U[2,:,:,:]),']')\n\n self.scaling = np.ones((3,self.NZ))\n\n #\n # calculate coordinates\n #\n if self.verbose:\n print('Calculating coordinates')\n #self.y = -0.5*(self.NY-1)*self.dy + np.arange(self.NY,dtype=self.realtype)*self.dy\n self.y = np.arange(self.NY,dtype=self.realtype)*self.dy\n self.z = self.zbot + np.arange(self.NZ,dtype=self.realtype)*self.dz\n #self.ztow = self.zbot - np.arange(self.NZ,dtype=self.realtype)*self.dz #--NOT USED\n\n self.t = np.arange(self.N,dtype=self.realtype)*self.dt\n if self.verbose:\n print('Read times [',self.t[0],self.t[1],'...',self.t[-1],']')", "def SFSchmidt10(jd,mag,errmag,nbin=0.1,bmin=5,bmax=2000):\n\n dtarray, dmagarray, sigmaarray = SFarray(jd,mag,errmag)\n ndt=np.where((dtarray<=365))\n dtarray=dtarray[ndt]\n dmagarray=dmagarray[ndt]\n sigmaarray=sigmaarray[ndt]\n\n bins=bincalc(nbin,bmin,bmax)\n #print(len(bins))\n\n\n sf_list=[]\n tau_list=[]\n numobj_list=[]\n\n for i in range(0,len(bins)-1):\n n=np.where((dtarray>=bins[i]) & (dtarray<bins[i+1]))\n nobjbin=len(n[0])\n if nobjbin>=6:\n dmag1=np.abs(dmagarray[n])\n derr1=np.sqrt(sigmaarray[n])\n sf=(np.sqrt(np.pi/2.0)*dmag1-derr1)\n sff=np.mean(sf)\n sf_list.append(sff)\n numobj_list.append(nobjbin)\n #central tau for the bin\n tau_list.append((bins[i]+bins[i+1])*0.5)\n\n\n SF=np.array(sf_list)\n nob=np.array(numobj_list)\n tau=np.array(tau_list)\n nn=np.where(nob>6)\n tau=tau[nn]\n SF=SF[nn]\n\n\n return (tau/365.,SF)", "def load_BindingDB_kd():\n affinity = pd.read_csv('./dataset/regression/BindingDB/BindingDB_Kd.txt', header=None)\n target = pd.read_csv('./dataset/regression/BindingDB/BindingDB_Target_Sequence_new.txt', header=None)\n drug = pd.read_csv('./dataset/regression/BindingDB/BindingDB_SMILES_new.txt', header=None)\n \n SMILES=[]\n Target=[]\n y=[]\n drugcnt=[]\n \n for i in range(len(target)):\n Target.append(target[0][i])\n y.append(affinity[0][i])\n SMILES.append(drug[0][i])\n\n aff=[]\n total=[]\n for i in range(len(target)):\n aff.insert(i, y[i].split(\" \"))\n for i in aff:\n total += i\n for i in range(len(SMILES)):\n drugcnt.insert(i, len(SMILES[i].split()))\n\n smile = []\n for segments in SMILES:\n for x in segments.split():\n smile.extend(x)\n #smile = [x for segments in SMILES for x in segments.split()]\n smiles_res=[]\n y_tmp=[]\n target_res=[]\n tmp=[]\n\n for i in range(len(drugcnt)):\n tmp.extend(repeat(Target[i], drugcnt[i]))\n for i in range(len(total)):\n if total[i] != '-1':\n y_tmp.append(total[i])\n smiles_res.append(smile[i])\n target_res.append(tmp[i])\n\n y_res = [float(i) for i in y_tmp]\n y_res = convert_y_unit(np.array(y_res), 'nM', 'p')\n return np.array(smiles_res), np.array(target_res), np.array(y_res)", "def dsinfomaker(compath, backpath, mwb, tcfs, SR=\"SR\"):#yrs, ves,\r\n\tdsinfo = OrderedDict()\r\n\t# ==========\r\n\tdsinfo[\"GFED\"] = ({\"alias\":\"GFED4.1\",\"long_name\":\"FRI\", \"units\":\"yrs\"})\r\n\tdsinfo[\"MODIS\"] = ({\"alias\":\"MCD64A1\", \"long_name\":\"FRI\",\"units\":\"yrs\", \"version\":\"v006\"})\r\n\tdsinfo[\"esacci\"] = ({\"alias\":\"FireCCI5.1\", \"long_name\":\"FRI\",\"units\":\"yrs\"})\r\n\tdsinfo[\"COPERN_BA\"] = ({\"alias\":\"CGLS\", \"long_name\":\"FRI\",\"units\":\"yrs\"})\r\n\tdsinfo[\"HANSEN_AFmask\"] = ({\"alias\":\"Hansen GFC & MCD14ML\", \"long_name\":f'FRI$_{{{SR}}}$',\"units\":\"yrs\"})\r\n\tdsinfo[\"HANSEN\"] = ({\"alias\":\"Hansen GFC\", \"long_name\":\"DRI\",\"units\":\"yrs\"})\r\n\tdsinfo[\"Risk\"] = ({\"alias\":\"Forest Loss Risk\"})\r\n\t# dsinfo[\"FutureRisk\"] = ({\"alias\":\"Forest Loss Risk\"})\r\n\tdsinfo[\"SRfrac\"] = ({\"alias\":\"Stand Replacing Fire Percentage\", \"long_name\":f'FRI$_{{{\"SR\"}}}$ %'})\r\n\r\n\tfor dsnm in dsinfo:\r\n\t\tif dsnm.startswith(\"H\"):\r\n\t\t\t# +++++ make a path +++++\r\n\t\t\tppath = compath + \"/BurntArea/HANSEN/FRI/\"\r\n\t\t\tfname = \"%s%s_annual_burns_MW_%ddegreeBox.nc\" % (dsnm, tcfs, mwb)\r\n\t\t\t# fname = \"%s%s_annual_burns_MW_%ddegreeBox.nc\" % (dsnm, mwb)\r\n\t\telif dsnm == \"Risk\":\r\n\t\t\tppath = compath + \"/BurntArea/Risk/FRI/\"\r\n\t\t\tfname = \"%s_annual_burns_MW_%ddegreeBox.nc\" % (dsnm, mwb)\r\n\t\t\tcf.pymkdir(ppath)\r\n\t\t# elif dsnm == \"FutureRisk\":\r\n\t\t# \tppath = compath + \"/BurntArea/Risk/FRI/\"\r\n\t\t# \tfname = f\"{dsnm}_annual_burns_MW_{mwb}degreeBox_{yrs}yrs_{ves}.nc\" \r\n\t\t# \tcf.pymkdir(ppath)\r\n\t\telse:\r\n\t\t\t# fname = \"Hansen_GFC-2018-v1.6_regrided_esacci_FRI_%ddegMW_SIBERIA\" % (mwb)\r\n\t\t\tppath = compath + \"/BurntArea/%s/FRI/\" % dsnm\r\n\t\t\tfname = \"%s_annual_burns_MW_%ddegreeBox.nc\" % (dsnm, mwb)\r\n\t\t# +++++ open the datasets +++++\r\n\t\tdsinfo[dsnm][\"fname\"] = ppath+fname\r\n\r\n\r\n\treturn dsinfo", "def BSGS(self, P):\n if P == self.infpoint:\n return 1\n\n bfsize = card(self.basefield)\n\n Q = self.mul(bfsize + 1, P)\n m = arith1.floorpowerroot(bfsize, 4) + 1\n Plist = [self.infpoint]\n R = P\n j = 1\n while j <= m:\n Plist.append(R)\n R = self.add(R, P)\n j = j+1\n R = self.mul(2*m, P)\n k = -m\n Plist_rev = list(map(self.mul, [-1]*(m+1), Plist)) # make reverse point mapping\n j = 0\n while k <= m:\n S = self.add(Q, self.mul(k, R))\n if S in Plist:\n j = Plist.index(S)\n break\n elif S in Plist_rev:\n j = -Plist_rev.index(S)\n break\n k = k+1\n M = self.ch+1+2*m*k-j\n Flist = factor_methods.factor(M)\n for p, e in Flist:\n for i in range(e):\n if self.mul(M//p, P) == self.infpoint:\n M = M//p\n return M", "def read_bc_table(fname=datapath+\"/bolometric_corrections/bc_p04_ugriz.data\"):\n with open(fname,'r') as fp:\n lines = fp.readlines()\n s = lines[1].split()\n NTeff, Nlogg, NMH, Nfilt = int(s[0]), int(s[2]), int(s[5]), int(s[7])\n allBCs = {}\n\n Teffs = list(map(float, \"\".join(lines[2:5]).replace(\"\\n\",\" \").split()))\n loggs = list(map(float, lines[5].split()))\n Nlist = list(map(int, lines[6].split()))\n iline = 7\n allBCs = {}\n for ifilt in range(Nfilt):\n BCtable = np.zeros((np.sum(Nlist)*NMH,4))\n itable = 0\n for iMH in range(NMH):\n s = lines[iline].split()\n FeH = float(s[2]); aFe = float(s[5]); filter = s[9]\n iline += 1\n for ilogg,logg in enumerate(loggs):\n BCrow = []\n while len(BCrow) < Nlist[ilogg]:\n line = lines[iline]\n iline += 1\n BCrow += list(map(float, line.split()))\n for iTeff,Teff in enumerate(Teffs[0:Nlist[ilogg]]):\n BCtable[itable,0] = Teff\n BCtable[itable,1] = logg\n BCtable[itable,2] = FeH\n BCtable[itable,3] = BCrow[iTeff]\n itable += 1\n allBCs[filter] = BCtable\n return allBCs", "def get_2NM (f,a,s):\r\n ss = []\r\n while True:\r\n ss.append(f.readline().strip())\r\n if ss[-1].upper().find(\"HISTOGRAMS\")== 0:\r\n break\r\n mlist = msigvals(ss)\r\n si = 0\r\n psp = []\r\n nummp = 0\r\n ainc = 1 if gv[\"newercode\"] else 2\r\n while si < len(ss):\r\n if ss[si].upper().find(\"POPULATION MIGRATION (2NM) TERMS\") == 0:\r\n si += 1\r\n aa = ss[si].split()\r\n for i in range(1,len(aa),ainc):\r\n psp.append([aa[i]])\r\n si += 3\r\n aa = ss[si].split()\r\n ii = 0\r\n for i in range(1,len(aa),ainc):\r\n psp[nummp + ii].append(float(aa[i]))\r\n ii += 1\r\n nummp = len(psp)\r\n si += 1\r\n\r\n for pi,p in enumerate(psp):\r\n mn = p[0][p[0].upper().find('M')+1:]\r\n i = 0\r\n while True:\r\n assert i < len(mlist)\r\n if mlist[i][0][1:] == mn:\r\n psp[pi].append(mlist[i][1])\r\n break\r\n i += 1\r\n return psp", "def rbd_series() -> NonRepairableRBD:\n edges = [(1, 2), (2, 3), (3, 4), (4, 5)]\n reliabilities = {\n 2: surv.Weibull.from_params([20, 2]),\n 3: surv.Weibull.from_params([100, 3]),\n 4: surv.Weibull.from_params([50, 20]),\n }\n return NonRepairableRBD(edges, reliabilities)", "def _read_sp(sp_file):\n content = sp_file.read()\n\n start_byte = 0\n n_bytes = 4\n signature = content[start_byte:start_byte + n_bytes]\n\n start_byte += n_bytes\n # the description is fixed to 40 bytes\n n_bytes = 40\n description = content[\n start_byte:start_byte + n_bytes].decode('utf8')\n\n meta = {'signature': signature,\n 'description': description}\n spectrum = []\n\n NBP = []\n start_byte += n_bytes\n n_bytes = 6\n block_id, block_size = _block_info(\n content[start_byte:start_byte + n_bytes])\n start_byte += n_bytes\n NBP.append(start_byte + block_size)\n while block_id != 122 and start_byte < len(content) - 2:\n next_block_id = content[start_byte:start_byte + 2]\n if indexbytes(next_block_id, 1) == 117:\n start_byte = NBP[-1]\n NBP = NBP[:-1]\n while start_byte >= NBP[-1]:\n NBP = NBP[-1]\n else:\n block_id, block_size = _block_info(\n content[start_byte:start_byte + n_bytes])\n start_byte += n_bytes\n NBP.append(start_byte + block_size)\n\n meta.update(_decode_5104(\n content[start_byte:start_byte + block_size]))\n\n start_byte = NBP[1]\n while start_byte < len(content):\n n_bytes = 6\n block_id, block_size = _block_info(\n content[start_byte:start_byte + n_bytes])\n start_byte += n_bytes\n if block_id in FUNC_DECODE.keys():\n decoded_data = FUNC_DECODE[block_id](\n content[start_byte:start_byte + block_size])\n if isinstance(decoded_data, dict):\n meta.update(decoded_data)\n else:\n spectrum = decoded_data\n start_byte += block_size\n\n wavelength = np.linspace(meta['min_wavelength'],\n meta['max_wavelength'],\n meta['n_points'])\n\n if isinstance(sp_file, string_types):\n meta['filename'] = basename(sp_file)\n else:\n meta['filename'] = basename(sp_file.name)\n\n return Spectrum(spectrum, wavelength, meta)", "def _s2bl(size):\n return size**2 // 8 + 1", "def get_instr_hsdpa_bler(self, carrier=1):\r\r\n loggerCmw = logging.getLogger('get_hsdpa_bler')\r\r\n bler_str = self.read('FETCh:WCDMa:SIGN:HACK:BLER:CARRier%s?' %carrier)\r\r\n bler_list = bler_str.split(',')\r\r\n\r\r\n bler = -1\r\r\n reliability = bler_list[0]\r\r\n if reliability == '0':\r\r\n bler = bler_list[1]\r\r\n bler = '%.3f' % float(bler)\r\r\n return bler", "def key_klifs_residues(numbering):\n if numbering == None:\n print(\"The structure was not found in the klifs database.\")\n key_res = None\n return key_res\n\n key_res = dict() #initialize key_res (which read from the 0-based numbering list)\n for i in range(5):\n key_res[f'group{i}'] = list()\n ## feature group 0: A-loop backbone dihedrals\n key_res['group0'].append(numbering[83]) # start of A-loop\n\n ## feature group 1: P-loop backbone dihedrals\n key_res['group1'].append(numbering[3]) # res0 in P-loop\n key_res['group1'].append(numbering[4]) # res1 in P-loop\n key_res['group1'].append(numbering[5]) # res2 in P-loop\n key_res['group1'].append(numbering[6]) # res3 in P-loop\n key_res['group1'].append(numbering[7]) # res4 in P-loop\n key_res['group1'].append(numbering[8]) # res5 in P-loop\n\n ## feature group 2: aC-related features\n #angle between aC and aE helices\n key_res['group2'].append(numbering[19]) # res0 in aC\n key_res['group2'].append(numbering[29]) # res10 in aC\n key_res['group2'].append(numbering[62]) # end of aE\n\n # key salt bridge\n key_res['group2'].append(numbering[16]) # K in beta III\n key_res['group2'].append(numbering[23]) # E in aC\n\n ## feature group 3: DFG-related features\n key_res['group3'].append(numbering[79]) # X-DFG\n key_res['group3'].append(numbering[80]) # DFG-Asp\n key_res['group3'].append(numbering[81]) # DFG-Phe\n key_res['group3'].append(numbering[27]) # ExxxX\n\n ## feature group 4: the FRET distance\n # not in the list of 85 (equivalent to Aura\"S284\"), use the 100% conserved beta III K as a reference\n key_res['group4'].append(numbering[16] + 120)\n\n # not in the list of 85 (equivalent to Aura\"L225\"), use the 100% conserved beta III K as a reference\n key_res['group4'].append(numbering[16] + 61)\n\n return key_res", "def erb2freq(n_erb):\n return 24.7 * 9.265 * (np.exp(n_erb / 9.265) - 1)", "def get_band_edges():\n # Vacuum level energy from LOCPOT.\n locpot = Locpot.from_file('LOCPOT')\n evac = max(locpot.get_average_along_axis(2))\n\n vasprun = Vasprun('vasprun.xml')\n bs = vasprun.get_band_structure()\n eigenvals = vasprun.eigenvalues\n efermi = vasprun.efermi - evac\n\n if bs.is_metal():\n edges = {'up_cbm': None, 'up_vbm': None, 'dn_cbm': None, 'dn_vbm': None,\n 'efermi': efermi}\n\n elif bs.is_spin_polarized:\n up_cbm = min(\n [min([e[0] for e in eigenvals[Spin.up][i] if not e[1]])\n for i in range(len(eigenvals[Spin.up]))]) - evac\n up_vbm = max(\n [max([e[0] for e in eigenvals[Spin.up][i] if e[1]])\n for i in range(len(eigenvals[Spin.up]))]) - evac\n dn_cbm = min(\n [min([e[0] for e in eigenvals[Spin.down][i] if not e[1]])\n for i in range(len(eigenvals[Spin.down]))]) - evac\n dn_vbm = max(\n [max([e[0] for e in eigenvals[Spin.down][i] if e[1]])\n for i in range(len(eigenvals[Spin.down]))]) - evac\n edges = {'up_cbm': up_cbm, 'up_vbm': up_vbm, 'dn_cbm': dn_cbm,\n 'dn_vbm': dn_vbm, 'efermi': efermi}\n\n else:\n cbm = bs.get_cbm()['energy'] - evac\n vbm = bs.get_vbm()['energy'] - evac\n edges = {'up_cbm': cbm, 'up_vbm': vbm, 'dn_cbm': cbm, 'dn_vbm': vbm,\n 'efermi': efermi}\n\n return edges" ]
[ "0.52736485", "0.5250206", "0.5216072", "0.52104783", "0.5193983", "0.51933676", "0.51401246", "0.5131391", "0.50727785", "0.50445265", "0.50316125", "0.50303996", "0.5022727", "0.499482", "0.4993226", "0.49653268", "0.4962925", "0.49464917", "0.4935478", "0.49284562", "0.49228424", "0.49201217", "0.49132565", "0.4905006", "0.49002367", "0.48906678", "0.48888293", "0.48704958", "0.48630327", "0.4862" ]
0.54406923
0
Given a TTT board `b`, determine who has won and return. If no one has won, return None
def winner(b): # Row of three for row in b: if row[0] == " ": # First row entry is blank; ignore! continue if row[0]==row[1] and row[1]==row[2]: return row[0] # Column of three for i in range(3): if b[0][i] == " ": # First column entry is blank; ignore! continue if b[0][i]==b[1][i] and b[1][i]==b[2][i]: return b[0][i] # Diagonals if b[1][1] != " ": # Middle entry not blank, so diagonal win # is a possibility. if b[0][0] == b[1][1] and b[1][1]==b[2][2]: return b[0][0] if b[0][2] == b[1][1] and b[1][1]==b[2][0]: return b[0][2] # implicit return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def winner(board):\n\t#For X\n\tiswinnerX = winnerForPlayer(board, X)\n\tiswinnerO = winnerForPlayer(board, O)\n\n\tif iswinnerX:\n\t\treturn X\n\tif iswinnerO:\n\t\treturn O\n\n\treturn None", "def winner(board):\n #To determine the winner, I need to know the board's final value. \n token_value = utility(board)\n #if it's 1, X won. If it's -1, O won. Else, it was a tie.\n if(token_value == 1):\n return 'X'\n elif(token_value == -1):\n return 'O'\n else:\n return None", "def who_won(self, board):\n winners = set()\n for x,y,z in self.wins:\n if board[x] == board[y] and board[y] == board[z]:\n winners.add(board[x])\n if 1 in winners and 2 in winners:\n return 3\n if 1 in winners:\n return 1\n if 2 in winners:\n return 2\n return 0", "def winner(board):\n # finite list of possible wins\n winnings = [\n (0, 0), (0, 1), (0, 2), \n (1, 0), (1, 1), (1, 2),\n (2, 0), (2, 1), (2, 2),\n (0, 0), (1, 0), (2, 0),\n (0, 1), (1, 1), (2, 1),\n (0, 2), (1, 2), (2, 2),\n (0, 0), (1, 1), (2, 2),\n (2, 0), (1, 1), (0, 2)\n ]\n # if the board has one of the lists in winnings \n # then the piece in one of those spots is the winner\n xcount = 0\n ocount = 0\n for i in range(len(winnings)):\n if(board[winnings[i][0]][winnings[i][1]] == X):\n xcount += 1\n if(board[winnings[i][0]][winnings[i][1]] == O):\n ocount += 1\n if((i + 1) % 3 == 0):\n if(ocount == 3 or xcount == 3):\n return board[winnings[i][0]][winnings[i][1]]\n else:\n ocount = 0\n xcount = 0\n return EMPTY", "def winner(board):\n for i in range(3):\n firstnumber=board[i][0]\n if firstnumber!=EMPTY:\n secondnumber=board[i][1]\n if secondnumber==firstnumber:\n if board[i][2]==secondnumber:\n return secondnumber\n else:\n continue\n else:\n continue\n else:\n continue\n for i in range(3):\n firstnumber=board[0][i]\n if firstnumber!=EMPTY:\n secondnumber=board[1][i]\n if secondnumber==firstnumber:\n if board[2][i]==secondnumber:\n return secondnumber\n else:\n continue\n else:\n continue\n else:\n continue\n firstnumber=board[0][0]\n if firstnumber!= EMPTY:\n if board[1][1]==firstnumber:\n if board[2][2]==firstnumber:\n return firstnumber\n firstnumber=board[2][0]\n if firstnumber!= EMPTY:\n if board[1][1]==firstnumber:\n if board[0][2]==firstnumber:\n return firstnumber\n return None\n raise NotImplementedError", "def winner(self, board):\n if self.any_legal_move(BLACK, board) or self.any_legal_move(WHITE,board):\n return None\n scoreBlack = self.score(BLACK, board)\n scoreWhite = self.score(WHITE, board)\n if scoreBlack > scoreWhite: return PLAYERS[BLACK]\n elif scoreBlack < scoreWhite: return PLAYERS[WHITE]\n else: return TIE", "def winner(board):\n\n # Check for horizontal wins\n for row in board:\n if row[0] == row[1] == row[2] and row[0] is not None:\n return row[0]\n\n # Check for vertical wins\n for i in range(3):\n if board[0][i] == board[1][i] == board[2][i] and board[0][i] is not None:\n return board[0][i]\n\n # Check for diagonal wins\n if board[0][0] == board[1][1] == board[2][2] and board[0][0] is not None:\n return board[0][0]\n if board[0][2] == board[1][1] == board[2][0] and board[0][2] is not None:\n return board[0][2]\n\n # If there is no winner, return None\n return None", "def winner(board):\n if board[0][0] != EMPTY and (board[0][0] == board[0][1] == board[0][2] \n or board[0][0] == board[1][1] == board[2][2] \n or board[0][0] == board[1][0] == board[2][0]):\n return board[0][0]\n\n elif board[1][1] != EMPTY and (board[1][0] == board[1][1] == board[1][2]\n or board[0][1] == board[1][1] == board[2][1]):\n return board[1][1]\n \n elif board[2][2] != EMPTY and (board[0][2] == board[1][2] == board[2][2]\n or board[2][0] == board[2][1] == board[2][2]):\n return board[2][2]\n \n elif board[2][0] != EMPTY and (board[2][0] == board[1][1] == board[0][2]):\n return board[2][0]\n \n else:\n None", "def winner(board):\n for i in range(3):\n if board[i][0] == board[i][1] == board[i][2] != None:\n return board[i][0]\n for i in range(3):\n if board[0][i] == board[1][i] == board[2][i] != None:\n return board[0][i]\n if board[0][0] == board[1][1] == board[2][2]:\n return board[0][0]\n if board[0][2] == board[1][1] == board[2][0]:\n return board[0][2]\n return None", "def look_for_win(self, board, player=None):\n\n win_spot = None\n if player is None:\n player = self\n\n for group in WINS:\n # creates a list of just the elements of the board which are\n # part of a specific win group and and not already owned by the player\n # and creates a list of tuples of the element and its value.\n not_mine = [(i, val) for i, val in enumerate(board.tttboard)\n if i in group\n and val != player.board_value]\n\n # If there's only one not owned by the ai player and not owned by\n # the other player then select it and we've won\n if len(not_mine) == 1 and not_mine[0][1] is None:\n # Maybe this should return the selection rather than\n # modifying the board in here. Decide later.\n win_spot = not_mine[0][0]\n break\n\n return win_spot", "def winner(self):\n for c in 'xo':\n for comb in [(0,3,6), (1,4,7), (2,5,8), (0,1,2), (3,4,5), (6,7,8), (0,4,8), (2,4,6)]:\n if all(self.spots[p] == c for p in comb):\n return c\n return None", "def has_won(board, player):\r\n return False", "def has_won(board, player):\n return False", "def who_win(a, b):\n if [a[0], b[0]] in [[1, 2], [2, 3], [3, 1]]:\n return b\n else:\n return a", "def winner(self, board):\n # Cek baris\n if all(i == board[0][0] for i in board[0]):\n return board[0][0]\n elif all(i == board[1][0] for i in board[1]):\n return board[1][0]\n elif all(i == board[2][0] for i in board[2]):\n return board[2][0]\n \n # Cek kolom\n elif board[0][0] == board[1][0] and board[1][0] == board[2][0]:\n return board[0][0]\n elif board[0][1] == board[1][1] and board[1][1] == board[2][1]:\n return board[0][1]\n elif board[0][2] == board[1][2] and board[1][2] == board[2][2]:\n return board[0][2]\n \n # Cek diagonal\n elif board[0][0] == board[1][1] and board[1][1] == board[2][2]:\n return board[0][0]\n elif board[0][2] == board[1][1] and board[1][1] == board[2][0]:\n return board[0][2]\n else:\n return None", "def get_winner(self):\n combos = [\n [0, 1, 2],\n [3, 4, 5],\n [6, 7, 8],\n [0, 3, 6],\n [1, 4, 7],\n [2, 5, 8],\n [0, 4, 8],\n [2, 4, 6],\n ]\n winner = None\n for combo in combos:\n a, b, c = combo\n if (\n self.board[a] is not None\n and self.board[a] == self.board[b]\n and self.board[a] == self.board[c]\n ):\n winner = self.board[a]\n break\n return winner", "def winner(board):\n # check columns\n for j in range(3):\n if board[1][j] == board[0][j] and board[0][j] == board[2][j] and board[1][j] != EMPTY:\n return board[1][j]\n # check rows\n for i in range(3):\n if board[i][0] == board[i][1] and board[i][1] == board[i][2] and board[i][0] != EMPTY:\n return board[i][0]\n # check diagnols\n if board[0][0] == board[1][1] and board[1][1] == board[2][2] and board[0][0] != EMPTY:\n return board[1][1]\n if board[0][2] == board[1][1] and board[1][1] == board[2][0] and board[0][2] != EMPTY:\n return board[1][1]\n return None", "def next_move(ttt):\r\n # get board in 2D array form\r\n b = ttt.get_board()\r\n \r\n # if there's a winning move, take it\r\n (cfw, win_move) = check_for_win_lose(b)\r\n if cfw is not None:\r\n if win_move:\r\n print 'COMPUTER WINS!'\r\n return cfw, win_move\r\n # otherwise, pres on with the next best move\r\n\r\n # get \"points\" on board. this tells us not only the move\r\n # but also who went first\r\n board_count = sum(sum(b,[]))\r\n \r\n # IF COMPUTER HAS FIRST TURN\r\n # if 1st move\r\n if board_count == 0:\r\n return (2,2), False # take the center\r\n # this is not best strategy for winning, but\r\n # it the human messes up, the computer can win.\r\n # taking a corner first makes it a little easier\r\n # for the computer to win becase the human only\r\n # has one correct move to make: to take the center\r\n \r\n # if 3rd move, and not a winning one\r\n if board_count == 3:\r\n if b[0][1]==2 or b[1][0]==2 or b[0][0]==2:\r\n return (3,3), False\r\n elif b[0][2]==2:\r\n return (3,1), False\r\n elif b[2][0]==2:\r\n return (1,3), False\r\n else:#elif b[1][2]==2 or b[2][1]==2 or b[2][2]==2:\r\n return (1,1), False\r\n\r\n # if 5th move, and not a winning or losing one\r\n if board_count == 6:\r\n b5 = numpy.array([[0,2,1],[0,1,0],[2,0,0]])\r\n if (b == b5).all():\r\n return (3,3), False\r\n elif (b == numpy.rot90(b5,1)).all():\r\n return (3,1), False\r\n elif (b == numpy.rot90(b5,2)).all():\r\n return (1,1), False\r\n elif (b == numpy.rot90(b5,3)).all():\r\n return (1,3), False\r\n\r\n b5 = numpy.array([[0,0,1],[0,1,2],[2,0,0]])\r\n if (b == b5).all():\r\n return (1,1), False\r\n elif (b == numpy.rot90(b5,1)).all():\r\n return (1,3), False\r\n elif (b == numpy.rot90(b5,2)).all():\r\n return (3,3), False\r\n elif (b == numpy.rot90(b5,3)).all():\r\n return (3,1), False\r\n\r\n # at this point, all possible boards should have been covered\r\n\r\n # if 7th move, and a winning or losing one\r\n if board_count == 9:\r\n # find the row or col with 2 open slots and mark it\r\n for ri in range(3):\r\n r = b[ri]\r\n if sum([1 if i==0 else 0 for i in r]) == 2:\r\n if r[0] == 0:\r\n return (ri+1,1), False\r\n else:\r\n return (ri+1,2), False\r\n for ci in range(3):\r\n c = get_col(b, ci)\r\n if sum([1 if i==0 else 0 for i in c]) == 2:\r\n if c[0] == 0:\r\n return (1,ci+1), False\r\n else:\r\n return (2,ci+1), False\r\n\r\n \r\n # IF HUMAN HAS FIRST TURN\r\n # if 2nd move\r\n if board_count == 2:\r\n if b[1][1] == 0:\r\n # if the center is open, computer has\r\n # to take it in order to not lose\r\n return (2,2), False\r\n else:\r\n # otherwise take a corner\r\n return (1,1), False\r\n\r\n # if 4th move\r\n if board_count == 5:\r\n # if we took a corner on move 2 and they\r\n # are using computer's offensive strategy\r\n # when it is first player\r\n b4 = [[1,0,0],[0,2,0],[0,0,2]]\r\n if b==b4:\r\n return (3,1), False\r\n # if we took center on move 2\r\n else:\r\n b4 = numpy.array([[2,0,0],[0,1,0],[0,0,2]])\r\n if (b == b4).all() or (b == numpy.rot90(b4,1)).all():\r\n return (1,2), False\r\n\r\n # overall ELSE -- just find a square\r\n for ri in range(3):\r\n for ci in range(3):\r\n if b[ri][ci] == 0:\r\n return (ri+1,ci+1), False", "def winner(board):\n for i in range(len(board)):\n\n # Check rows\n if board[i][0] == board[i][1] == board[i][2] and not board[i][1] == EMPTY:\n return board[i][1]\n\n # Check columns\n elif board[0][i] == board[1][i] == board[2][i] and not board[1][i] == EMPTY:\n return board[1][i]\n\n # Check diagonals\n if board[0][0] == board[1][1] == board[2][2] and not board[1][1] == EMPTY:\n return board[1][1]\n\n if board[2][0] == board[1][1] == board[0][2] and not board[1][1] == EMPTY:\n return board[1][1]\n\n # No winner if get to this point\n return None", "def check_bingo_board(board_num):\n winning_board = False\n for i in range(BINGO_SIZE):\n if bingoboards[board_num][i].count(\"X\") == 5:\n winning_board = board_num\n for j in range(BINGO_SIZE):\n col = list(map(lambda x: x[j], bingoboards[board_num]))\n if col.count(\"X\") == 5:\n winning_board = board_num\n return winning_board", "def get_winner(board_values: dict[str, str]) -> Optional[str]:\n for combo in c.WINNING_COMBOS:\n entries = {board_values[k] for k in combo}\n if len(entries) == 1:\n return entries.pop()\n\n # if all the squares are filled but no winners were detected, it's a tie\n if set(board_values.values()) == {c.X, c.O}:\n return c.TIE\n\n return None", "def check_tie(board):\n return 0 not in board[0]", "def winner(board):\n for turn in [X,O]:\n for i in range(3):\n if board[i] == [turn, turn, turn]:\n return turn\n if board[0][i] == turn and board[1][i] == turn and board[2][i] == turn:\n return turn\n if board[0][0] == turn and board[1][1] == turn and board[2][2] == turn:\n return turn\n if board[0][2] == turn and board[1][1] == turn and board[2][0] == turn:\n return turn\n return None", "def getUtility(board):\n winner = getWinner(board)\n if winner == X:\n return 1\n if winner == O:\n return -1\n return 0", "def utility(board):\n # if game is over (tie/winner) decide who won\n if(terminal(board)):\n if(winner(board) == X):\n return 1\n elif(winner(board) == O):\n return -1\n else:\n return 0", "def utility(board):\n if winner(board) is None:\n return 0\n elif winner(board) == X:\n return 1\n elif winner(board) == O:\n return -1", "def winner(board):\n # Checking for 3 in a row\n for row in board:\n if row[0] is not EMPTY and row[0] == row[1] == row[2]:\n return row[0]\n\n # Checking for 3 in a col\n for col in range(len(board)):\n if board[0][col] is not EMPTY and board[0][col] == board[1][col] == board[2][col]:\n return board[0][col]\n\n # Checking for Diagonals\n if board[0][0] is not EMPTY and board[0][0] == board[1][1] == board[2][2]:\n return board[0][0]\n \n if board[0][2] is not EMPTY and board[0][2] == board[2][0] == board[1][1]:\n return board[0][2]\n\n return None", "def winner(board):\n # Check Rows\n for row in board:\n if row[0] != EMPTY and row[0] == row[1] and row[0] == row[2]:\n return row[0]\n \n # Check Columns\n for j in range(3):\n if board[0][j] != EMPTY and board[0][j] == board[1][j]:\n if board[0][j] == board[2][j]:\n return board[0][j]\n \n # Check Diagonals\n if board[1][1] != EMPTY:\n if board[0][0] == board[1][1] and board[0][0] == board[2][2]:\n return board[0][0]\n if board[0][2] == board[1][1] and board[0][2] == board[2][0]:\n return board[0][2]\n\n return None", "def who_won(self):\n if self.scoreB >= WP: return Stone.BLACK\n if self.scoreW >= WP: return Stone.WHITE\n return self.last", "def utility(board):\n return utility_map[winner(board)]" ]
[ "0.6610197", "0.6459119", "0.641945", "0.64099497", "0.64009243", "0.63863385", "0.63806957", "0.63735557", "0.6358739", "0.6316436", "0.63094836", "0.6282113", "0.6265861", "0.62483245", "0.6238915", "0.62181", "0.61878645", "0.6153537", "0.6098851", "0.6092213", "0.6091631", "0.6034719", "0.6030418", "0.60290754", "0.6024122", "0.6001655", "0.600115", "0.5991995", "0.5991852", "0.59881234" ]
0.6824795
0
Modify board b (list of lists) to account for move in string `move`. If the move is illegal, raises an exception.
def apply_move(b,player,move): move = move.strip().lower() if len(move)!=2: raise Exception("Valid move is two characters (e.g. A2 or B3)") if move[0] not in COLS: move = move[::-1] if move[0] not in COLS: raise Exception("No column spec found") j = COLS.index(move[0]) i = int(move[1])-1 if b[i][j] != " ": raise Exception("Another move already filled that position") b[i][j] = player
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def partial_move(B, cur_monkey_pos, cur_num_balloons, move):\n\n assert B[-1][cur_monkey_pos] == \"x\"\n R = len(B)\n C = len(B[0])\n\n new_board = [r for r in B[:-1]]\n new_bottom_row = [0 for _ in range(C)]\n new_monkey_pos = cur_monkey_pos\n new_num_balloons = cur_num_balloons\n\n if move == \"left\":\n if new_monkey_pos == 0:\n return None\n new_monkey_pos -= 1\n elif move == \"right\":\n if new_monkey_pos == C - 1:\n return None\n new_monkey_pos += 1\n elif move == \"shoot\":\n # simulate the dart\n for row in range(R - 2, -1, -1):\n if B[row][new_monkey_pos] != 0:\n new_row = list(B[row])\n new_row[new_monkey_pos] -= 1\n if new_row[new_monkey_pos] == 0:\n new_num_balloons -= 1\n new_board[row] = tuple(new_row)\n break\n else:\n assert False, \"invalid move: \" + move\n\n new_bottom_row[new_monkey_pos] = \"x\"\n new_board.append(tuple(new_bottom_row))\n return (tuple(new_board), new_monkey_pos, new_num_balloons)", "def move(direction: str, board : list) -> list:\n board_length = len(board)\n x, y = find_empty_space(board)\n \n increment_x = 0 \n increment_y = 0\n\n if direction == Direction.Up:\n increment_x, increment_y = Coordinate.Up.value\n elif direction == Direction.Down:\n increment_x, increment_y = Coordinate.Down.value\n elif direction == Direction.Left:\n increment_x, increment_y = Coordinate.Left.value\n elif direction == Direction.Right:\n increment_x, increment_y = Coordinate.Right.value\n\n x_new = x + increment_x\n y_new = y + increment_y\n\n is_valid = is_valid_move(x_new, y_new, board_length)\n\n if is_valid: \n temp = board[x][y]\n board[x][y] = board[x_new][y_new]\n board[x_new][y_new] = temp\n return board\n return None", "def execute_move(board, move):\n\n player, spike_index, fields_to_move = Judge._validate_move(move)\n\n board.set_player_perspective(player)\n \n if spike_index == OUT_OF_BAR_SPECIAL_MOVE:\n dest_spike_index = fields_to_move - 1\n board.remove_checker_from_bar()\n else:\n dest_spike_index = spike_index + fields_to_move\n board.pop_player_checker(spike_index)\n\n if dest_spike_index >= len(INITIAL_SPIKES_STATE):\n return board\n\n board.push_player_checker(dest_spike_index)\n\n return board", "def update_board(board: Board, move: Move) -> Board:\n old_position = move[0]\n new_position = move[1]\n character = board[old_position[0]][old_position[1]]\n board = change_position(board, new_position, character)\n board = clear_position(board, old_position)\n \n return board", "def execution(move,legal,board,player):\r\n \r\n if player == 1:\r\n if move in legal:\r\n for i in range(0,len(board.white)):\r\n if board.white[i] == move[0]:\r\n board.white[i] = move[1]\r\n if len(move) == 3:\r\n board.black.remove(move[-1])\r\n\r\n else:\r\n print(\"Illegal move, please input a legal move\")\r\n human_move(board,player)\r\n else:\r\n if move in legal:\r\n if len(move) == 3:\r\n board.white.remove(move[-1])\r\n for i in range(0,len(board.black)):\r\n if board.black[i] == move[0]:\r\n board.black[i] = move[1]\r\n else:\r\n print(\"Illegal move, please input a legal move\")\r\n human_move(board,player)", "def play_move(board, move):\n\tboard_copy = list(board)\n\n\tboard_copy[move] = 'o'\n\treturn ''.join(board_copy)", "def set_board(self, move_string):\n next_side = \"X\"\n for col_string in move_string:\n col = int(col_string)\n if col >= 0 and col <= self.width:\n self.add_move(col, next_side)\n if next_side == \"X\":\n next_side = \"O\"\n else:\n next_side = \"X\"", "def move(self, move):\n possible_words = self._possible_words()\n if move not in possible_words:\n raise ValueError\n else:\n return self._extend(move)", "def make_move(self, move, player, board):\r\n #nBoard = board.copy()\r\n board[move] = player\r\n for d in core.DIRECTIONS:\r\n if self.find_bracket(move, player, board, d)!=None:\r\n self.make_flips(move, player, board, d)\r\n return board", "def move(self, board):\n raise NotImplementedError", "def make_move(self, state):\r\n # intially set drop phase to true\r\n drop_phase = True\r\n move = [] # list to make moves with to return\r\n succ = self.succ(state) # get the successor of this state\r\n # intial postion of board to set up most advantagous spot if its empty\r\n if sum(x.count(self.my_piece) for x in self.board) == 0 and self.board[2][2] == ' ':\r\n move.insert(0, (2, 2))\r\n return move\r\n \r\n # check the number of 'r' and 'b' on board if theres 4 of each drop phase is false\r\n if sum(x.count('r') for x in self.board) == 4 and sum(x.count('b') for x in self.board) == 4:\r\n drop_phase = False\r\n\r\n # if not during drop phase use minimax to make next move from one postion to next\r\n if not drop_phase:\r\n move = []\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n f = d['from']\r\n s = sorted(succ, key=lambda e: e['f'])\r\n moveto = s[-1]\r\n move.insert(1, (moveto['from'][0], moveto['from'][1]))\r\n move.insert(0, (moveto['pos'][0], moveto['pos'][1]))\r\n return move # return the from, to move\r\n\r\n else: #else use minimax and to make move during drop phase selecting spot to place AI piece\r\n d = self.Max_value(state, 0)\r\n val = d['val']\r\n m = d['move']\r\n p = d['pos']\r\n hold = []\r\n move = []\r\n n = None\r\n hold = []\r\n for s in succ:\r\n p = s['pos'][0]\r\n p1 = s['pos'][1]\r\n if s['f'] == val and state[p][p1] == ' ':\r\n hold.append(s)\r\n if len(hold) == 1:\r\n row = hold[0]['pos'][0]\r\n col = hold[0]['pos'][1]\r\n else:\r\n f = sorted(hold, key=lambda e: e['pos'])\r\n row = f[0]['pos'][0]\r\n col = f[0]['pos'][1]\r\n\r\n move.insert(0, (row, col)) # return the move \r\n return move", "def handle_move(self, move_string):\n def map_move(move):\n col = int(ascii_lowercase.find(move[0])) + 1 # dummy col\n row = int(move[1:])\n # if not 0 < col <= game[\"board_width\"]:\n # raise ValueError('bad coord; invalid col in ' + coord)\n # if not 0 < row <= game[\"board_height\"]:\n # raise ValueError('bad coord; invalid row in ' + coord)\n return row*(self.rules[\"row_len\"]) + col\n move = list(map(map_move,move_string.split(' ')))\n self.turn[\"board\"][move[0]].make_move(*move[1:])\n self.turn[\"half_move_clock\"] += 1\n if self.turn[\"active_player\"] == 1:\n self.turn[\"full_move_clock\"] += 1\n self.turn[\"active_player\"] = (self.turn[\"active_player\"] + 1) % 2\n # self.turn[\"board\"][move_start].make_move(move_end)", "def moved_board(board):\n return legal_move_on(board=board).map(\n lambda (start, end): board.move(start=start, end=end),\n )", "def play_move(self, move_data, all_english_words):\r\n \r\n #first, make copy of board and try to apply move there.\r\n board_cpy = copy.deepcopy(self.myboard)\r\n \r\n rack_cpy = copy.deepcopy(self.rack) #***need to remove appropriate words from rack after we've made a move. \r\n\r\n i = 0 \r\n for cur_char in move_data[0]:\r\n new_tile = Tile(cur_char, self.game_bag.letter_freq_and_val[cur_char][1]) #create a new tile.\r\n if move_data[3] == \"horizontal\":\r\n print(\"adding cur_char {} at {} {}\".format(cur_char, move_data[1], move_data[2]+i))\r\n board_cpy.place_tile(move_data[1], move_data[2]+ i, new_tile)\r\n elif move_data[3] == \"vertical\":\r\n board_cpy.place_tile(move_data[1] + i, move_data[2], new_tile)\r\n\r\n i = i + 1\r\n #print(\"New i value is {}\".format(i))\r\n board_cpy.print_board()\r\n \r\n\r\n #once we're done placing the tiles, check for validity of entire board.\r\n cur_board_words = board_cpy.find_words_on_board()\r\n move_valid = True #assume move is valid, until proven otherwise.\r\n\r\n for word_data in cur_board_words:\r\n word = word_data[0]\r\n print(word)\r\n if word not in all_english_words:\r\n return False #do nothing else\r\n \r\n #print(\"Getting here; all words valid\")\r\n \r\n #getting here means that the move is actually valid, with no conflicts.\r\n main_board = self.myboard\r\n #In this case, add to the real board. \r\n i = 0 \r\n for cur_char in move_data[0]:\r\n new_tile = Tile(cur_char, self.game_bag.letter_freq_and_val[cur_char][1]) #create a new tile.\r\n if move_data[3] == \"horizontal\":\r\n main_board.place_tile(move_data[1], move_data[2]+ i, new_tile)\r\n elif move_data[3] == \"vertical\":\r\n main_board.place_tile(move_data[1] + i, move_data[2], new_tile)\r\n\r\n i = i + 1\r\n return True", "def make_move(B, cur_monkey_pos, cur_num_balloons, cur_num_lives, move):\n\n def check_lose(B, cur_monkey_pos):\n \"\"\"\n Args:\n B (tuple): board configuration\n cur_monkey_pos (int): current column position of the monkey\n Output:\n bool: True if a balloon will hit the monkey when the balloons shift down; False otherwise\n \"\"\"\n assert B[-1][cur_monkey_pos] == \"x\"\n if B[-2][cur_monkey_pos] != 0:\n return True\n return False\n\n def shift_down(B, cur_monkey_pos, cur_num_lives):\n \"\"\"\n Just performs the shift of all the balloons downwards.\n Args:\n B (tuple): board configuration\n cur_monkey_pos (int): current column position of the monkey\n cur_num_lives (int): current number of lives in this configuration\n Output:\n (tuple, int): tuple consisting of the board configuration after balloons have all moved\n down by 1 and the new number of lives (or None if the monkey gets hit)\n \"\"\"\n\n if check_lose(B, cur_monkey_pos):\n return None\n\n new_board = []\n new_num_lives = cur_num_lives\n\n # construct the top row: if the balloon hits the ground, it respawns with +1 and we lose a life\n new_num_lives -= sum(1 for b in B[-2] if b > 0)\n top_row = tuple((b + 1 if 0 < b < 3 else b) for b in B[-2])\n new_board.append(top_row)\n\n # move all the middle rows down\n new_board.extend([r for r in B[:-2]])\n\n # add the ground row: nothing changes\n new_board.append(B[-1])\n\n return (tuple(new_board), new_num_lives)\n\n def partial_move(B, cur_monkey_pos, cur_num_balloons, move):\n \"\"\"\n Just performs the move, without the shift downwards\n Args:\n B (tuple): board configuration\n cur_monkey_pos (int): current column position of the monkey\n cur_num_balloons (int): current number of balloons on the board\n move (str): the proposed move (one of 'left', 'right', 'shoot')\n Output:\n (tuple, int, int): A tuple consisting of the board configuration after the move,\n the new monkey position, and the new number of balloons on the map\n (or None if invalid move)\n \"\"\"\n\n assert B[-1][cur_monkey_pos] == \"x\"\n R = len(B)\n C = len(B[0])\n\n new_board = [r for r in B[:-1]]\n new_bottom_row = [0 for _ in range(C)]\n new_monkey_pos = cur_monkey_pos\n new_num_balloons = cur_num_balloons\n\n if move == \"left\":\n if new_monkey_pos == 0:\n return None\n new_monkey_pos -= 1\n elif move == \"right\":\n if new_monkey_pos == C - 1:\n return None\n new_monkey_pos += 1\n elif move == \"shoot\":\n # simulate the dart\n for row in range(R - 2, -1, -1):\n if B[row][new_monkey_pos] != 0:\n new_row = list(B[row])\n new_row[new_monkey_pos] -= 1\n if new_row[new_monkey_pos] == 0:\n new_num_balloons -= 1\n new_board[row] = tuple(new_row)\n break\n else:\n assert False, \"invalid move: \" + move\n\n new_bottom_row[new_monkey_pos] = \"x\"\n new_board.append(tuple(new_bottom_row))\n return (tuple(new_board), new_monkey_pos, new_num_balloons)\n\n # make the move\n move_res = partial_move(B, cur_monkey_pos, cur_num_balloons, move)\n if move_res is None: # invalid move\n return None\n move_board, new_monkey_pos, new_num_balloons = move_res # unpack\n\n # shift all the balloons down\n shift_res = shift_down(move_board, new_monkey_pos, cur_num_lives)\n if shift_res is None: # check if a balloon hit the monkey\n return None\n new_board, new_num_lives = shift_res # unpack\n return (new_board, new_monkey_pos, new_num_balloons, new_num_lives)", "def make_move(self, move):\n if int(move) < 0 or int(move) > 48 or self.board[int(move) // 7][int(move) % 7] != \"\" or int(move) % 2 == 0:\n raise ValueError(\"{} is not a valid move for {}\".format(move, self.board))\n DotsAndBoxesState.score1 += self.check_score(move)\n self.board[int(move) // 7][int(move) % 7] = colors[self.turn] + self.turn + \"\\u001b[0m\"\n self.turn = get_opponent(self.turn) #change into another player's trun", "def make_move(self, state):\n emptySpaces = 0\n for row in state:\n emptySpaces = emptySpaces + row.count(' ')\n if emptySpaces > 17:\n drop_phase = True\n else:\n drop_phase = False\n\n move = []\n if not drop_phase:\n best_value = float('-inf')\n best_state = None\n for s in self.succ(state, False, self.my_piece):\n if self.game_value(s) == -1 or self.game_value(s) == 1:\n best_state = s\n break\n currValue = self.Min_Value(state, 0)\n if currValue>best_value:\n best_value = currValue\n best_state = s\n for i in range(len(state)):\n for j in range(len(state[i])):\n if state[i][j]!= ' ' and best_state[i][j]== ' ':\n move.append((i,j))\n if state[i][j]== ' ' and best_state[i][j]!= ' ':\n move.insert(0, (i,j))\n \n # TODO: choose a piece to move and remove it from the board\n # (You may move this condition anywhere, just be sure to handle it)\n #\n # Until this part is implemented and the move list is updated\n # accordingly, the AI will not follow the rules after the drop phase!\n \n\n # select an unoccupied space randomly\n # TODO: implement a minimax algorithm to play better\n \n else:\n best_value = float('-inf')\n best_state = None\n for s in self.succ(state, True, self.my_piece):\n if self.game_value(s) == -1 or self.game_value(s) == 1:\n best_state = s\n break\n currValue = self.Min_Value(state, 0)\n if currValue>best_value:\n best_value = currValue\n best_state = s\n for i in range(len(state)):\n for j in range(len(state[i])):\n if state[i][j]== ' ' and best_state[i][j]!= ' ':\n move.insert(0, (i,j))\n \n return move", "def respond_to_move(self, move):\n\n # this will get the piece at the queried position,\n # will notify user if there is no piece there\n current_algebraic, new_algebraic = move\n row, column = self.algebraic_mapped_to_position[current_algebraic]\n if self.board[row][column] == empty_square:\n print(\"There is no piece at %s\" % (current_algebraic,))\n return\n piece, location = self.board[row][column]\n\n # this will get all possible moves from this position\n # and will make the move if the new position is a\n # valid move\n piece_name = self.piece_names[piece]\n moves = self.moves[piece_name]((row, column))\n \n new_row, new_column = self.algebraic_mapped_to_position[new_algebraic]\n print(\"old position %s, %s\" % (row, column))\n print(\"new algebraic %s\" % new_algebraic)\n print(\"new position %s, %s\" % (new_row, new_column))\n print(\"moves %s\" % moves)\n if (new_row, new_column) in moves:\n # this will change the game board to reflect the move\n self.board[row][column] = empty_square\n self.board[new_row][new_column] = piece+location", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def setBoard( self, moveString ): \n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.__width:\n self.addMove(col, nextCh)\n if nextCh == 'X': nextCh = 'O'\n else: nextCh = 'X'", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def test_check_move_with_invalid(self):\n board = [\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\" \"] * 6,\n [\"\\u25cb\"] * 6,\n [\" \"] * 6,\n [\" \"] * 6\n ]\n valid = self.game.check_move(board, 4)\n self.assertFalse(valid)", "def setBoard( self, moveString ):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.width:\n self.addMove(col, nextCh)\n if nextCh == 'X': nextCh = 'O'\n else: nextCh = 'X'", "def setBoard( self, moveString ):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.__width:\n self.addMove(col, nextCh)\n if nextCh == 'X': \n nextCh = 'O'\n else: nextCh = 'X'", "def setBoard(self, moveString):\n nextCh = 'X' # start by playing 'X'\n for colString in moveString:\n col = int(colString)\n if 0 <= col <= self.width:\n self.addMove(col, nextCh)\n if nextCh == 'X':\n nextCh = 'O'\n else:\n nextCh = 'X'", "def add_move(self, move):\n \n self.current_board[move.position[0]][move.position[1]] = move.player.char", "def undo_move(self, move):\n if move in self.board:\n self.board[move] = self.BLANK_CELL_CHAR\n self.available_moves.add(move)\n else:\n raise ValueError('Move-undo [{}] not possible.'.format(move))" ]
[ "0.65394884", "0.6422928", "0.6320928", "0.62613", "0.61935437", "0.616798", "0.61599106", "0.6131119", "0.61113816", "0.60769236", "0.60658014", "0.60647887", "0.60047555", "0.597028", "0.59584993", "0.5950637", "0.59433466", "0.59427494", "0.5942658", "0.5942658", "0.5942658", "0.59414655", "0.59288514", "0.59288514", "0.5926264", "0.5917329", "0.591354", "0.5904633", "0.5899182", "0.58968556" ]
0.81654453
0
Annotate sequence with kmer repeats.
def repeat_annotation(sequence, kmer_size): max_observed_repeats = [1 for i in range(len(sequence))] for i in range(len(sequence) - (kmer_size - 1)): kmer_count = 0 start_index = i end_index = i + (kmer_size - 1) for j in range(i, len(sequence), kmer_size): if sequence[i:i+kmer_size] == sequence[j:j+kmer_size]: kmer_count += 1 else: break end_index = j + (kmer_size) for k in range(start_index, min(len(sequence), end_index)): max_observed_repeats[k] = max(max_observed_repeats[k], kmer_count) return max_observed_repeats
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def annotate(m, ss_seq): # -> None:\n ...", "def repeat(self, number_of_repeats):\n return \"G\" + str(number_of_repeats)", "def kmer_composition(k, text):\r\n # TODO: your code here\r\n d = {}\r\n for i in range(len(text)-k+1):\r\n print(text[i:k+i])\r\n ''' \r\n if(text[i:k+i] in d.keys()):\r\n d[text[i:k+i]] += 1\r\n else:\r\n d[text[i:k+i]] = 1\r\n print(d)\r\n '''", "def test_sequence_annotate(self):\n self.t(\"1,2 annotate note\")\n code, out, err = self.t(\"_get 1.annotations.1.description 2.annotations.1.description\")\n self.assertEqual(\"note note\\n\", out)", "def increase(self):\n self.counter[0] += 1\n\n for x in range(len(self.sequences) -1):\n if self.counter[x] == len(self.sequences[x]) + 1:\n self.counter[x] = 0\n self.counter[x+1] += 1", "def getKmers(seq, k):\n \n kmd = {}\n \n for i in range(len(seq)+1-k):\n kmer = seq[i:i+k]\n kmd[kmer] = kmd.get(kmer,0) + 1\n return kmd", "def add_label_sequence(self, label_seq):\n curr_ngram = self.all_grams\n for label in label_seq:\n curr_ngram.add_count()\n curr_ngram = curr_ngram.get_next_Ngram(label)\n # Add count for last label\n curr_ngram.add_count()", "def add_annotations(self):\n for i in range(8):\n self.text.append(self.canvas.create_text(-self.width / 2,\n (self.width / 2) + (i * self.width),\n font=(\"Purisa\", 12), anchor=\"nw\"))\n self.canvas.itemconfig(self.text[i], text=str((i - 8) * -1))\n for i in range(8):\n self.text.append(self.canvas.create_text((self.width / 2) + (i * self.width),\n self.width * 8 + 10, font=(\"Purisa\", 12), anchor=\"nw\"))\n self.canvas.itemconfig(self.text[i + 8], text=string.ascii_lowercase[i])", "def _annotate(self, mfccs):\n if self.slices is None or self.ipa_regions is None:\n raise ValueError(\"No IPA regions. Call setup_regions() prior\")\n\n # Define some short hands\n slices = self.slices\n ipa_regions = self.ipa_regions\n\n annotation_regions = []\n mfcc_len = mfccs.shape[1]\n sample_ann = [None] * mfcc_len\n\n # Convert slices into sample points.\n for s in slices:\n mfcc_rate = mfcc_len / self.length\n annotation_regions.append(round(s * mfcc_rate))\n annotation_regions.append(mfcc_len)\n\n # Loop through the annotation regions, and set them.\n for i in range(len(annotation_regions) - 1):\n low = annotation_regions[i]\n high = annotation_regions[i+1]\n for sample_ind in range(low, high):\n if sample_ind >= len(sample_ann):\n print(sample_ind)\n print(len(sample_ann))\n sample_ann[sample_ind] = ct.IPA_MAP[ipa_regions[i]]\n self.annotated_samples = sample_ann", "def kmer_count(self,size):\n if size == 1:\n return ['A','T','C','G']\n else:\n result = []\n for seq in Analyze_DNA_Sequence.kmer_count(self,size-1):\n for base in ['A','T','C','G']:\n result.append(seq+base)\n return result", "def repeat(self, repeats):\n return SeriesDefault.register(pandas.Series.repeat)(self, repeats=repeats)", "def visualize_attention(test_seq,\n model,\n id2wrd,\n n):\n\n get_layer_output = K.function([model.layers[0].input, K.learning_phase()], [model.layers[4].output])\n out = get_layer_output([test_seq, ])[0] # test mode\n\n att_w = model.layers[5].get_weights()\n\n eij = np.tanh(np.dot(out[0], att_w[0]))\n ai = np.exp(eij)\n weights = ai/np.sum(ai)\n weights = np.sum(weights,axis=1)\n\n topKeys = np.argpartition(weights,-n)[-n:]\n\n print(' '.join([id2wrd[wrd_id] for wrd_id in test_seq[0] if wrd_id != 0.])) \n \n for k in test_seq[0][topKeys]:\n if k != 0.:\n print(id2wrd[k])\n \n return", "def embed_seq(self,X_seq,Y_seq):\n X_embed = -tr.ones(len(X_seq),self.og_signal_dim+self.og_noise_dim)\n # find trials of corresponding types\n pm_trials_bool = X_seq >= self.ntokens_og\n pm_trials = np.where(pm_trials_bool)\n og_trials = np.where(np.logical_not(pm_trials_bool))\n # take signal_dim (time,edim_signal_dim)\n pm_embeds = self.emat_pm[X_seq[pm_trials] - self.ntokens_og] \n og_embeds = self.emat_og[X_seq[og_trials]] \n # make noise (time,edim_noise)\n pm_noise = tr_noise_pm([len(pm_embeds),self.pm_noise_dim])\n og_noise = tr_noise_og([len(og_embeds),self.og_noise_dim])\n # cat signal_dim and noise (time,edim)\n pm_embeds = tr.cat([pm_embeds,pm_noise],-1)\n og_embeds = tr.cat([og_noise,og_embeds],-1)\n # put into respective positions\n X_embed[pm_trials] = pm_embeds\n X_embed[og_trials] = og_embeds \n # include batch dim \n X_embed = tr.unsqueeze(X_embed,1)\n Y_embed = tr.unsqueeze(tr.LongTensor(Y_seq),1)\n return X_embed,Y_embed", "def set_sequence(self, counter):\n self.seq_counter = counter", "def MakeRepeat1(self,content):\n return self.register(Repeat1(content,reg=self))", "def group(seq):\n pass # replace with your solution", "def __generateSentences(self, ngrams, n, length, repetition, seed):\n randInt = random.randint(1, repetition)\n sent = ''\n for i in range(randInt):\n sent += self.__markovGen(self.ngrams, n, length, seed)\n sent += ' '\n return sent", "def set_label_text(index):\n nonlocal count_label\n labeled = len(annotations.loc[annotations['changed']])\n str_output = f'{labeled} of {len(annotations)} Examples annotated, Current Position: {index + 1} '\n if id_column in annotations.columns and index >= 0 and index < len(annotations):\n ix = annotations.iloc[index].name\n str_output += f\"(id: {annotations.at[ix, id_column]}) \"\n count_label.value = str_output", "def add_to_subtitle_offset(annotation, seen_annotations, value):\n if not annotation[\"time\"] in seen_annotations:\n seen_annotations[annotation[\"time\"]] = value\n else:\n seen_annotations[annotation[\"time\"]] += value", "def generate_title(model, tokenizer, photo, max_length):\n in_text = \"startseq\"\n vocab = len(tokenizer.word_index) + 1\n prev_word = \"\"\n\n for i in range(max_length):\n sequence = tokenizer.texts_to_sequences([in_text])[0]\n sequence = pad_sequences([sequence], maxlen=max_length)\n yhat = model.predict([photo, sequence], verbose=0)\n yhat = random.choice(list(range(vocab)), 1, p=yhat[0])\n # yhat = argmax(yhat)\n word = word_for_id(yhat, tokenizer)\n\n if word is None:\n break\n\n if word == prev_word:\n pass\n\n in_text += \" \" + word\n\n prev_word = word\n\n if word == \"endseq\":\n break\n\n return in_text", "def get_repetitions(element):\r\n return int(element.attrib.get('repeat', 1))", "def sample_decorations(self, scaffold_seqs, scaffold_seq_lengths):\n # TODO: fix the return type to be SampledSequencesDTO\n batch_size = scaffold_seqs.size(0)\n\n input_vector = torch.full(\n (batch_size, 1), self.vocabulary.decoration_vocabulary[\"^\"], dtype=torch.long) # (batch, 1)\n # print(f\"input_vector: {input_vector}\")\n seq_lengths = torch.ones(batch_size) # (batch)\n encoder_padded_seqs, hidden_states = self.network.forward_encoder(scaffold_seqs, scaffold_seq_lengths)\n nlls = torch.zeros(batch_size)\n not_finished = torch.ones(batch_size, 1, dtype=torch.long)\n sequences = []\n for _ in range(self.max_sequence_length - 1):\n logits, hidden_states, _ = self.network.forward_decoder(\n input_vector, seq_lengths, encoder_padded_seqs, hidden_states) # (batch, 1, voc)\n probs = logits.softmax(dim=2).squeeze() # (batch, voc)\n log_probs = logits.log_softmax(dim=2).squeeze() # (batch, voc)\n input_vector = torch.multinomial(probs, 1) * not_finished # (batch, 1)\n sequences.append(input_vector)\n nlls += self._nll_loss(log_probs, input_vector.squeeze())\n not_finished = (input_vector > 1).type(torch.long) # 0 is padding, 1 is end token\n if not_finished.sum() == 0:\n break\n\n decoration_smiles = [self.vocabulary.decode_decoration(seq)\n for seq in torch.cat(sequences, 1).data.cpu().numpy()]\n scaffold_smiles = [self.vocabulary.decode_scaffold(seq) for seq in scaffold_seqs.data.cpu().numpy()]\n return zip(scaffold_smiles, decoration_smiles, nlls.data.cpu().numpy().tolist())", "def repeat_count(self):\n if hasattr(self, '_m_repeat_count'):\n return self._m_repeat_count if hasattr(self, '_m_repeat_count') else None\n\n self._m_repeat_count = (self.repeat_count_m1 + 1)\n return self._m_repeat_count if hasattr(self, '_m_repeat_count') else None", "def random_kmers(k, t, dna):\n\n motifs = np.zeros((t, k), dtype='str')\n for i in range(t):\n random_kmer_index = random.randrange(len(dna[0, :]) - k)\n current_seq = dna[i, :]\n motifs[i, :] = list(current_seq[random_kmer_index: random_kmer_index + k])\n\n return motifs", "def add_sequencing_errors(barcode_counts, seq_error_rate=0.01):\n new_list = []\n for barcode in tqdm(barcode_counts.elements(), total=sum(barcode_counts.values()), desc=\"Sequencing BCs\", unit=\"bc\", leave=False):\n new_barcode = str()\n for base in barcode:\n if np.random.random() > seq_error_rate:\n new_barcode += base\n else:\n counter['sequencing errors'] += 1\n new_barcode += np.random.choice(errors_dict[base])\n\n new_list.append(new_barcode)\n return collections.Counter(new_list)", "def padded_sequences(input_sequences, total_words):\r\n max_len = max([len(x) for x in input_sequences])\r\n input_sequences = np.array(pad_sequences(input_sequences, maxlen=max_len, padding='pre'))\r\n print(input_sequences)\r\n\r\n predictors, label = input_sequences[:, :-1], input_sequences[:, -1] # creates two variables: sequence / next word of Ngram\r\n label = ku.to_categorical(label, num_classes=total_words)\r\n return predictors, label, max_len", "def create_kmers(seq,kmer_size):\n\n return [seq[i:(i+kmer_size)] for i in range(len(seq)-kmer_size+1)]", "def addNormalizing(self, name, seq):\n\n for i in xrange(len(seq) - self.kmer_size + 1):\n s = strandless(seq[i:i + self.kmer_size].upper())\n if \"N\" in s:\n continue\n self.normalizingKmers.add(s)", "def Show_Sequences( self ):\r\n self.system.Change_Seq( \"Sequence\" )", "def repeat(word, repetitions):\n return word * repetitions" ]
[ "0.57868123", "0.53828126", "0.52845156", "0.51891977", "0.5072232", "0.5051787", "0.49803054", "0.49799493", "0.49720377", "0.49627554", "0.49418154", "0.49380988", "0.4931937", "0.49287304", "0.49239197", "0.4888765", "0.48718172", "0.48550633", "0.48515433", "0.48390466", "0.48252672", "0.48061988", "0.4805044", "0.47944003", "0.47935095", "0.47915888", "0.47661206", "0.47528157", "0.47399342", "0.4720611" ]
0.6944195
0
Return a specific BokehJS deployment file
def bokehjs_file(filename): return flask.send_from_directory(bokeh_app.bokehjsdir, filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bokehjssrc_file(filename):\n return flask.send_from_directory(bokeh_app.bokehjssrcdir, filename)", "def get_vendor_js():\n return (\"://plotly-load_from_python.js\",)", "def get_bokeh_resources() -> TemplateResourcesData:\n template_resources = TemplateResourcesData()\n template_resources.js = CDN.js_files[0]\n template_resources.css = CDN.css_files[0]\n\n return template_resources", "def _get_deployment_config_file():\n config_path = cfg.CONF.find_file(\n cfg.CONF.paste_deploy['api_paste_config'])\n if config_path is None:\n return None\n\n return os.path.abspath(config_path)", "def get_js_file(self):\n return 'placeholder'", "def launcher_path() -> Optional[str]:\n return u.resource(LAUNCHER_SCRIPT)", "def app_bundle(self) -> str:\n if self.minimize:\n js_url = f\"https://cdn.jsdelivr.net/gh/salesforce/cloudsplaining@{__version__}/cloudsplaining/output/dist/js/index.js\"\n bundle = f'<script type=\"text/javascript\" src=\"{js_url}\"></script>'\n return bundle\n else:\n with open(app_bundle_path, \"r\", encoding=\"utf-8\") as f:\n bundle_content = f.read()\n # bundle_content = app_bundle_path.read_text(encoding=\"utf-8\")\n bundle = f'<script type=\"text/javascript\">\\n{bundle_content}\\n</script>'\n return bundle", "def get_wrapper_js_path(cls):\n return os.path.join(os.path.dirname(__file__), \"wrap_crowd_source.js\")", "def deploy_cfg():\n return '{buildout}.cfg'.format(buildout=env.host.split('.')[0])", "def _get_egg_path(self):\n try:\n _dist = get_distribution('janitoo_nut')\n return _dist.__file__\n except AttributeError:\n return 'src-nut/config'", "def package_filename(dist, *filename):\n static = static_filename(dist)\n if static is None:\n return\n if not os.path.exists(os.path.join(static, 'js', 'package.json')):\n return\n js_filename = os.path.abspath(os.path.join(static, 'js'))\n if filename is not None:\n js_filename = os.path.join(js_filename, *filename)\n if not os.path.exists(js_filename):\n return\n return js_filename", "def generate_js_dir():\n\n return pkg_resources.resource_filename('linkedin.mobster.har.visualization.js', None)", "def get_config_file():\n return deployr_config_repository.get_deployr_config_file()", "def get_config_file_name(self):\n argv = sys.argv\n config_type = \"dev\" # default configuration type\n if None != argv and len(argv) > 1 :\n config_type = argv[1]\n config_file = config_type + \".cfg\"\n logger.info(\"get_config_file_name() return : \" + config_file)\n return config_file", "def _get_scripts_resource(pe):\n return next(\n (\n entry.directory.entries[0].directory.entries[0]\n for entry in pe.DIRECTORY_ENTRY_RESOURCE.entries\n if entry.name and entry.name.string == b\"PYTHONSCRIPT\"\n ),\n None,\n )", "def get_bundle(conf, asset_type, bundle_name):\n \n content_type = 'application/javascript'\n content = []\n \n if asset_type == 'css':\n content_type = 'text/css'\n \n for asset in conf[asset_type][bundle_name]:\n content.append(open(os.path.join(conf['srcDir'], asset_type, asset)).read())\n \n content = ''.join(content)\n \n return '200 OK', content_type, content", "def config_file_address() -> str:\n\n config_files = json_files_from_folder(\"config\")\n config_file = choose_config(config_files) # Choice a config file if there is more then 1 in config folder\n return config_file", "def get_bundled_schema_path():\n return str(data.load_resource(\"schema\"))", "def get_src_js(self):\n if self.get_style() != self.STYLE_BASE:\n return f\"dtables/js/dataTables.{self.get_style()}.js\"\n else:\n return f\"dtables/js/{self.get_style()}.dataTables.js\"", "def _get_jqplot(self, filename):\n return \"common/js/jqPlot/\" + filename + \".js\"", "def configFilename(self):\n return self.name()+'.py'", "def _bundle(self):\n # Default to DEFAULT_BUNDLE_NAME\n bundle_path = os.path.join(self.working_dir, DEFAULT_BUNDLE_NAME)\n return self.config['app'].get('bundle', bundle_path)", "def get_default_javascript():\n return [\"_static/require.js\"]", "def datapackage_path():\n return 'datapackage.json'", "def _get_config_fname():\n directory = _get_vispy_app_dir()\n if directory is None:\n return None\n fname = op.join(directory, 'vispy.json')\n if os.environ.get('_VISPY_CONFIG_TESTING', None) is not None:\n fname = op.join(_TempDir(), 'vispy.json')\n return fname", "def get_vendor_bundle_path() -> str:\n vendor_bundle_directory = os.path.join(os.path.dirname(__file__), \"dist\", \"js\")\n file_list_with_full_path = []\n for f in os.listdir(vendor_bundle_directory):\n file_path = os.path.join(vendor_bundle_directory, f)\n if os.path.isfile(file_path):\n if os.path.splitext(file_path)[-1].endswith(\"js\"):\n if os.path.splitext(f)[0].startswith(\"chunk-vendors\"):\n file_list_with_full_path.append(os.path.abspath(file_path))\n return file_list_with_full_path[0]", "def client(filename):\n return static_file(filename, root=ROOT + '/client/build')", "def get_data_path(name):\n js = open('config.json').read()\n data = json.loads(js)\n return os.path.expanduser(data[name]['data_path'])", "def get_sls_config_file(path, stage, region):\n for name in gen_sls_config_files(stage, region):\n if os.path.isfile(os.path.join(path, name)):\n return name\n return \"config-%s.json\" % stage # fallback to generic json name", "def get_bundle_file():\n if FLAGS.bundle_file is None:\n return None\n else:\n return os.path.expanduser(FLAGS.bundle_file)" ]
[ "0.7016131", "0.61770284", "0.6039032", "0.5895731", "0.58782774", "0.58702964", "0.567083", "0.5658906", "0.56109107", "0.5562391", "0.553811", "0.55331784", "0.55214226", "0.5498094", "0.5483374", "0.5479998", "0.54668665", "0.54259694", "0.54118305", "0.5410391", "0.540808", "0.54052997", "0.53630936", "0.53630495", "0.5337482", "0.5326686", "0.5288568", "0.52859473", "0.52725726", "0.52588993" ]
0.740915
0
Return a specific BokehJS source code file
def bokehjssrc_file(filename): return flask.send_from_directory(bokeh_app.bokehjssrcdir, filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bokehjs_file(filename):\n return flask.send_from_directory(bokeh_app.bokehjsdir, filename)", "def source(self):\n return some.dap.source(py.path.local(self.co_filename))", "def getSource():", "def get_js_file(self):\n return 'placeholder'", "def get_vendor_js():\n return (\"://plotly-load_from_python.js\",)", "def get_main_source(self):\n\t\tpath, loader = self.main\n\t\tif path is not None:\n\t\t\treturn loader.get_source(path)", "def get_src_js(self):\n if self.get_style() != self.STYLE_BASE:\n return f\"dtables/js/dataTables.{self.get_style()}.js\"\n else:\n return f\"dtables/js/{self.get_style()}.dataTables.js\"", "def view_source_js(fn): #py:view_source_js\n RUR._view_source_js_(fn)", "def get_source(self):", "def get_wrapper_js_path(cls):\n return os.path.join(os.path.dirname(__file__), \"wrap_crowd_source.js\")", "def pull_code(notebook):\n cells = notebook[\"cells\"]\n code = []\n for cell in cells:\n if cell[\"cell_type\"] == \"code\":\n code.extend(cell[\"source\"] + [\"\\n\"])\n return ''.join(code)", "def source_test_file_content():\n return 'feature content'", "def source(self) -> str | Path:\n ...", "def get_script(blob):\n return get_script_class(blob.path)(source=blob.abspath)", "def get_source_file(self):\n return self.get_attribute(\"source_file\")", "def get_source(self):\n\t\treturn self.source.get_source()", "def getsourcefile(object):\r\n filename = getfile(object)\r\n if string.lower(filename[-4:]) in ('.pyc', '.pyo'):\r\n filename = filename[:-4] + '.py'\r\n for suffix, mode, kind in imp.get_suffixes():\r\n if 'b' in mode and string.lower(filename[-len(suffix):]) == suffix:\r\n # Looks like a binary file. We want to only return a text file.\r\n return None\r\n if os.path.exists(filename):\r\n return filename\r\n # only return a non-existent filename if the module has a PEP 302 loader\r\n if hasattr(getmodule(object, filename), '__loader__'):\r\n return filename\r\n # or it is in the linecache\r\n if filename in linecache.cache:\r\n return filename", "def _load_snippet(filename) -> str:\n fullpath = f'{dirname(__file__)}/js/{filename}'\n file = open(fullpath, 'r')\n script = file.read()\n file.close()\n return script", "def structure_jsmol(cif_str):\n from jsmol_bokeh_extension import JSMol\n import bokeh.models as bmd\n\n script_source = bmd.ColumnDataSource()\n\n info = dict(\n height=\"100%\",\n width=\"100%\",\n use=\"HTML5\",\n serverURL=\"https://chemapps.stolaf.edu/jmol/jsmol/php/jsmol.php\",\n j2sPath=\"https://chemapps.stolaf.edu/jmol/jsmol/j2s\",\n #serverURL=\"https://www.materialscloud.org/discover/scripts/external/jsmol/php/jsmol.php\",\n #j2sPath=\"https://www.materialscloud.org/discover/scripts/external/jsmol/j2s\",\n #serverURL=\"detail/static/jsmol/php/jsmol.php\",\n #j2sPath=\"detail/static/jsmol/j2s\",\n script=\"\"\"set antialiasDisplay ON;\n load data \"cifstring\"\n {}\n end \"cifstring\"\n \"\"\".format(cif_str)\n ## Note: Need PHP server for approach below to work\n # script=\"\"\"set antialiasDisplay ON;\n #load cif::{};\n #\"\"\".format(get_cif_url(entry.filename))\n )\n\n applet = JSMol(\n width=600,\n height=600,\n script_source=script_source,\n info=info,\n #js_url=\"detail/static/jsmol/JSmol.min.js\",\n )\n\n return applet", "def bokeh_constructor( loader, node ):\n global workspace\n args = loader.construct_mapping(node, deep=True)\n args = resolve_pointer( workspace, args )\n\n source = None\n\n if not 'figure' in args:\n args['figure'] = {}\n\n args['figure'] = resolve_pointer( workspace, args['figure'] )\n if 'source' in args:\n source = blaze.odo( args['source'], ColumnDataSource )\n\n p = figure( **args['figure'] )\n\n for glyph, kwargs in yaml_to_args(args['glyphs']):\n if source:\n kwargs['source'] = source\n getattr( p, glyph )( **kwargs )\n\n return p", "def getsource(object):\r\n lines, lnum = getsourcelines(object)\r\n return string.join(lines, '')", "def source(self):\n return self._source_code", "def process_lessjs(source, filepath):\n # This is a simple pass through, we don't need to do anything for less.js\n # to work\n return source", "def get_source (self, name):\n containment = self.containments.get (name)\n if containment is None:\n raise ImportError ('No such module: \\'{}\\''.format (name))\n return (containment [0] if sys.version_info [0] > 2 else\n containment [0].encode ('utf-8'))", "def get_bokeh_resources() -> TemplateResourcesData:\n template_resources = TemplateResourcesData()\n template_resources.js = CDN.js_files[0]\n template_resources.css = CDN.css_files[0]\n\n return template_resources", "def render_source(self, filename, obj):\n raise NotImplementedError()", "def _getscriptsource(self):\n\t\tscriptname = misc.sysinfo.script_name.rstrip(\"c\")\n\t\ttry:\n\t\t\tencoding = tokenize.detect_encoding(open(scriptname, \"rb\").readline)[0]\n\t\t\twith open(scriptname, \"r\", encoding=encoding, errors=\"replace\") as f:\n\t\t\t\tself.source = f.read()\n\t\texcept IOError: # Script might have called ``os.chdir()`` before\n\t\t\tself.source = None", "def getsource(object):\n lines, lnum = getsourcelines(object)\n return string.join(lines, '')", "def load_data_source(data_source):\n source_module = __import__('source_'+data_source)\n get_source = getattr(source_module, 'get_source')\n return get_source()", "def get_alert_source_module(alert_source_command_line_arg):\n\n return ALERT_SOURCE_MAPPING[alert_source_command_line_arg]" ]
[ "0.7006394", "0.61880594", "0.60094464", "0.5971398", "0.59102297", "0.590078", "0.58968616", "0.5809108", "0.57917756", "0.57526124", "0.5746741", "0.5706897", "0.5677012", "0.5642199", "0.55706066", "0.55467117", "0.5493918", "0.5486592", "0.54426706", "0.5422555", "0.5408623", "0.54011416", "0.5397154", "0.5395473", "0.536558", "0.5356233", "0.5355693", "0.5354957", "0.53501874", "0.53493905" ]
0.6657942
1
This function should return a list containing the two highest quantity, in descending order, for all fruits.
def sorted_fruit_quantity(f): # skip the header of the file move_cursor(f) # put all the quantities into a list # expected output: [5, 10, 3, 15] # read the file line by line output = [] for line in f: line_list = line.split() # ["Apple","5"] output.append(int(line_list[1])) # sort the list in descending order # expected output: [15, 10, 5, 3] output.sort(reverse=True) # only select the highest two quantities in the list and return them # expected output: [15, 10] # slicing # Hint: ending pos is the index of the first element that I don't want to include # in the final result return output[0:2]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_popular(self, max_items=None):\n data = [(x[1], x[0]) for x in self.iteritems()]\n data.sort(key=lambda x: (sys.maxint - x[0], x[1]))\n if max_items:\n return data[:max_items]\n else:\n return data", "def findSecondLargest(self):\n l = []\n self.flatten(l)\n print(l)\n print(l[-2])", "def largest_item(list):\n pass", "def get_most_popular_artists(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_artists", "def how_popular_most_popular(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_count", "def most_popular(lst):\n lst2 = copy.copy(lst)\n most_pop = []\n\n for j in range(10):\n biggest = 0\n biggest_name = \"\"\n index = 0\n for i in range(len(lst2)):\n if lst2[i][1] > biggest:\n biggest = lst2[i][1]\n biggest_name = lst2[i][0]\n index = i\n most_pop.append((j+1, biggest, biggest_name))\n del lst2[index]\n return most_pop", "def find_max_profit(prices):\n profit = []\n for index, price in enumerate(prices):\n buy = prices[index]\n sell_list = prices[index + 1:]\n if sell_list != []:\n for sell_price in sell_list:\n profit.append(sell_price - buy)\n return sorted(profit)[-1]", "def greatest_difference(num_list):", "def maximumToys(moneyAvailable, priceList):\n priceList.sort()\n count = 0\n for toyPrice in priceList:\n if toyPrice <= moneyAvailable:\n count += 1\n moneyAvailable -= toyPrice\n else:\n return count", "def highest_product_2(arr):\n\n # make a list to store the highest three ints, initializing to first three\n maxes = [arr[0], arr[1], arr[2]]\n\n # find the lowest of the highest three ints\n lowest_max = min(maxes)\n\n # go through the rest of the list to check for higher values\n for num in arr[3:]:\n # if any value is higher than the lowest max, update maxes list\n if num > lowest_max:\n # remove the old maximum\n maxes.remove(lowest_max)\n # add the new one\n maxes.append(num)\n # recalculate the lowest max for continued comparison\n lowest_max = min(maxes)\n\n return maxes[0] * maxes[1] * maxes[2]", "def __get_top(self, result, top=10):\n result = result.sort_values(by=\"bias_score\", ascending=False).drop_duplicates(subset='productid', keep=\"first\")\n print(result)\n result = result[:top].sort_values(by=\"final_score\", ascending=False).productid\n\n return list(result)", "def get_food_most_calories(df=df):\r\n max_calories_row = df.loc[df['Calories'].idxmax()]\r\n return max_calories_row['Item']", "def get_stock_symbol_with_highest_cap():\n #data2 = _cap_str_to_mln_float('cap')\n symbol_max = dict()\n for items in data:\n if items['symbol'] in symbol_max.keys():\n symbol_max[items['symbol']] = max(symbol_max[items['symbol']], _cap_str_to_mln_float(items['cap']))\n else:\n symbol_max[items['symbol']] = _cap_str_to_mln_float(items['cap'])\n\n value = sorted(symbol_max.items(), key = lambda x:x[1], reverse=True)[0][0]\n #sorted(symbol_max.items(), key = lambda x:x[1])\n return value", "def get_food_most_calories(df=df):\n return df[df.Calories == df.Calories.max()][\"Item\"].values[0]", "def chosen_items(sack, items, weight):\n total = total_profit(sack, items, weight)\n chosen = []\n \n while total != 0:\n for i in range(items + 1):\n if total in sack[i]:\n chosen.append(i) \n total = total - profit[i - 1] \n break \n \n return sorted(chosen)", "def mostTopBilled (movies, count):\n # TODO: complete this function\n import operator\n topBilledDict = {}\n for (k,v) in movies.items():\n if v[2] not in topBilledDict:\n topBilledDict[v[2]] = 0\n topBilledDict[v[2]] += 1\n topBilledList = []\n for actor in topBilledDict:\n topBilledList += [(actor,topBilledDict[actor])]\n topBilledList = sorted(topBilledList)\n topBilledList = [(y,x) for (x,y) in topBilledList]\n topBilledList = sorted(topBilledList, key = operator.itemgetter(0), reverse = True)[:count]\n return topBilledList", "def most_popular_artist(our_data):\n counter_dict = {}\n for artist in all_artists(our_data):\n if artist in counter_dict:\n counter_dict[artist] += 1\n else:\n counter_dict[artist] = 1\n maximum_albums = max(counter_dict.values())\n artist_lists = []\n for keys, values in counter_dict.items():\n if values == maximum_albums:\n artist_lists.append(keys) \n return artist_lists", "def max_val(t):\n # Your code here\n\n def openItem(term):\n newList = []\n\n for item in term:\n if type(item) == int:\n newList.append(item)\n\n else:\n newList += openItem(item)\n\n return newList\n\n sortingList = openItem(t)\n\n maximum = sortingList[0]\n\n for item in sortingList:\n if maximum < item:\n maximum = item\n\n return maximum", "def top_n(items, n):\n\n for i in range(n):\n for j in range(len(items)-1-i):\n\n if items[j] > items[j+1]:\n items[j], items[j+1] = items[j+1], items[j]\n \n top_n = items[-n:]\n\n return top_n[::-1]", "def give_greatest_spart(cls, spart_list):\n if len(spart_list) == 1:\n return spart_list[0]\n sorted_list = cls.sort_by_dominance(spart_list)\n if not(sorted_list[0] > sorted_list[1]):\n print(\"The two largest elements are non-comparable\")\n return []\n else:\n return sorted_list[0]", "def lab10_q3():\n return \"\"\"\n Use list comprehension max(lst_of_qvm, key=lambda qvm : total_revenue(qvm))\n\tThis makes each element of the list go through the key which gives total_revenue for each one. Then just get the max in that list\n \"\"\"", "def knapsack(items, limit):\n row = [0] * (limit + 1)\n item_row = [[]] * (limit + 1)\n for name, wt, val in items:\n for w in xrange(limit, wt, -1):\n score = val + row[w-wt]\n if score > row[w]:\n row[w] = score\n item_row[w] = [(name, wt, val)] + item_row[w-wt]\n return item_row[-1]", "def best_value(stock):\n best_sell = sell = stock.pop()\n buy = stock.pop()\n\n while stock:\n num = stock.pop()\n if num < buy:\n buy = num\n sell = best_sell\n elif best_sell - num > sell - buy:\n sell, buy = best_sell, num\n elif num > best_sell:\n best_sell = num\n\n return (buy, sell)", "def max_profit(prices: List[int]) -> int:", "def get_greatest_stock_price():\n greatest_stock_price = 0\n // your code here", "def top_boys(self):\n return [boy for boy in self._db.boys.find().sort('rating', pymongo.DESCENDING).limit(5)]", "def two_largest(inlist):\n largest = second_largest = 0\n it1 = it2 = 0\n\n for i,item in enumerate(inlist):\n if item > largest:\n largest = item\n it1 = i\n elif largest > item > second_largest:\n second_largest = item\n it2 = i\n # Return the results as a tuple\n return largest, it1, second_largest, it2", "def findOutcomes(outcomeCounts, highestCount):\n highestOutcomesList = []\n## ADD CODE HERE\n for index in outcomeCounts:\n if outcomeCounts[index] == highestCount:\n highestOutcomesList.append(index)\n\n return highestOutcomesList", "def highestMax(requestContext, seriesList, n):\n result_list = sorted( seriesList, key=lambda s: max(s) )[-n:]\n\n return sorted(result_list, key=lambda s: max(s), reverse=True)", "def get_max_loot(input_list):\n even = sum(input_list[::2])\n odd = sum(input_list[1::2])\n return even if even > odd else odd" ]
[ "0.62113386", "0.59263444", "0.586248", "0.5803011", "0.5801856", "0.5784802", "0.56722295", "0.5614632", "0.5612991", "0.55954313", "0.5578384", "0.5560552", "0.5556085", "0.5530599", "0.55201465", "0.55028576", "0.549978", "0.54275393", "0.5413033", "0.5404578", "0.5397812", "0.5389804", "0.538837", "0.5352174", "0.53493315", "0.53300554", "0.53107166", "0.530808", "0.5307843", "0.5274794" ]
0.7458074
0
Parse MBP data with given url
def parse_mbp_data(self, url): body = self.get_content(url) if body is None: return None doc_body = pq(body) content = doc_body('#main-container').html() lines = content.splitlines() record = { 'url': url, 'price': self.get_price(lines), 'year': self.get_year(lines), 'bat_count': self.search_int( r'循環\D*(\d+)', content, 1 ), 'screen': self.search_int( r'(1\d{1})[吋"\']', content, 1 ), 'cpu': self.search_string( r'i5|i7', content ), 'ram': self.search_int( r'(8|16)G', content, 1 ), 'hdd': self.search_int( r'(128|256|512)G', content, 1 ), } return record
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(self, url):\n pass", "def readData(url, data=[], code='gbk'):\n tmp = urlopen(url).read().decode(code)\n data.append(tmp)\n return tmp", "def parse_url(url):\n url_parts = url.split('/')\n webcam_name = url_parts[-3] + 'CAM' + url_parts[-2]\n file_ext = url[-5:-1]\n last_update = 0.\n return {\n 'url': url[:-1], # Skip end of line\n 'name': webcam_name,\n 'imgpath': os.path.join(WEBCAM_DIR, webcam_name, '%d' + file_ext),\n 'last_update': last_update\n }", "def get_data(self, url):\n return self.get(url).get('data', [])", "def callback_from_url(self, url):\n if re.search(\"https?://mebook.cc/page/.*\", url):\n return self.parse_list_page\n\n if re.search(\"https?://mebook.cc/date/.*\", url):\n return self.parse_archive_page\n\n if re.search(\"https?://mebook.cc/category/.*$\", url):\n return self.parse_category_page\n\n if re.search(\"https?://mebook.cc/[^/]+.html$\", url):\n return self.parse_book_page\n\n if re.search(\"https?://mebook.cc/download.php?id=.*$\", url):\n return self.parse_download_page", "def parse(self, response):\n\n content_url_list = response.xpath(\n '//*[@height=\"22\"]/font/a/@href').extract()\n yield Request(url='http://www.pbc.gov.cn' + content_url_list[randint(0, 14)], headers=self.headers, callback=self.parse_data, method='POST')", "def get_records_from_url(url):\n with requests.get(url) as response:\n source = response.text\n return parseString(source)", "def get_data_from_URL(url):\n querystring = {\"q\": \"eminem\"}\n headers = {\n 'x-rapidapi-host': \"deezerdevs-deezer.p.rapidapi.com\",\n 'x-rapidapi-key': \"SIGN-UP-FOR-KEY\"\n }\n\n response = requests.request(\n \"GET\", url, headers=headers, params=querystring)\n received_file = json.loads(response.text)\n return received_file", "def parse_url(self, url: str):\n time.sleep(0.1)\n resp = requests.get(url, timeout=5).content.decode('windows-1250')\n selector = Selector(text=resp)\n name_addresses = []\n if not self.is_right_page(selector):\n return []\n\n company = self.parse_business_name(selector)\n name_addresses += self.parse_management_body(selector)\n name_addresses += self.parse_partners(selector)\n\n ret = []\n for name_address in name_addresses:\n name_address = [re.sub(r'[\",;]', '', n).strip() for n in name_address]\n print(\"Found name: \", name_address)\n is_russian = self.RUSSIA in name_address[1]\n ret.append([re.sub(r'[\",;]', '', company).strip()] + name_address + [is_russian])\n return ret", "def get_data_from_web():\n pass", "def get_fred_data(url):\n pass", "def _extract_data(self):\n if self.URL_type == \"youtube\" or self.URL_type == \"ytmusic\":\n self._get_youtube_data_url()\n elif self.URL_type == \"soundcloud\":\n self._get_soundcloud_data()", "def fetch_song_data(url):\r\n response = requests.get(url)\r\n return response.text", "def parse(url):\n\n # Local constants\n\n # Local variables\n data = requests.get(url).text\n soup = BeautifulSoup(data, \"html.parser\")\n divs = soup.find(\"div\", \"price-point price-point--market\")\n labels = divs.find_all(\"th\", \"price-point__name\")\n prices = divs.find_all(\"td\", \"price-point__data\")\n card_name = soup.find(\"h1\", {\"class\": \"product-details__name\"}).text\n set_name = soup.find(\"a\", attrs={\"data-aid\": \"setNameSearch\"}).text\n card_data = {\n \"url\": url,\n \"card_name\": card_name.replace(\"\\'\", \"\"),\n \"set_name\": set_name.replace(\"\\'\", \"\")\n }\n\n #****** start parse() ******#\n\n for i, val in enumerate(prices):\n # Verifying what's in prices is actual a number of sorts\n if not is_number(prices[i].text[1:]): card_data[labels[i].text] = None\n else: card_data[labels[i].text] = float(prices[i].text[1:])\n\n return card_data", "def parse(self, response):", "def process_poem(url):\r\n\r\n response = get(url)\r\n html_soup = BeautifulSoup(response.text, 'html.parser')\r\n beyts = html_soup.find_all('span', class_ = 'verse')\r\n beyts = [beyt.text for beyt in beyts]\r\n info_dict = process_key_items(html_soup)\r\n info_dict['beyts'] = beyts\r\n\r\n return info_dict", "def _parse_source(self, response):\n return response.url", "def get_data(self, url):\n\n req = urllib2.Request(url)\n # urlencode the query dictionary\n try:\n r = urllib2.urlopen(req)\n result = r.read()\n except:\n result = 'The url: %s is not responding.' % (url)\n return result", "def data_collector(self, n, url, ret):\n try:\n html = urllib2.urlopen(url).read()\n soup = BeautifulSoup(html)\n ret[n] = [soup.title.string, url, html[0:100]]\n except:\n ret[n] = [\"Error\", url, \"Error\"]", "def __fetch_from_url(url: str) -> Any:\n song_information: Any = None\n try:\n # Send the request and load the returned contents.\n req = request.Request(url, headers={\n 'User-Agent': Config.Config.get_user_agent()\n })\n response = request.urlopen(req)\n contents: str = response.read().decode('utf-8')\n except (HTTPError, TimeoutError) as ex:\n Logger.Logger.log_error(str(ex))\n Logger.Logger.log_error('Request failed for URL: ' + url)\n return\n # Parse the response from the endpoint as a JSON encoded string\n data: Any = json.loads(contents)\n # Check if response contains at least one result, otherwise return \"None\".\n if data['resultCount'] > 0:\n song_information = data\n return song_information", "def get_data(url: str, parser: str = \"lxml\", headers: dict = None) -> dict:\n cntnt, rslt, msg = \"content\", \"result\", \"message\"\n pattern_http = \"^http\"\n m_l = {\n \"start\": \"Начинаем загрузку данных с сайта\",\n \"error\": \"Не удалось получить данные:\\n\\t>> Адрес:\\t%s\\n\\t>> Ошибка:\\t%s\",\n \"get_site\": \"Пробуем скачать данные с ресурса\",\n \"url_check\": \"Проверяем, являются ли введенные данные адресом веб-страницы\",\n \"url_correct\": \"Введен корректный адрес веб-страницы:\\t%s\",\n \"path_check\": \"Проверяем, являются ли введенные данные адресом файла \\n\\t>> Адрес:\\t%s\",\n \"parse\": \"Пробуем обработать полученные данные\",\n \"agent\": \"Содержиимое строки headers:\\n\\t>>\\t%s\",\n \"success\": \"Данные с сайта успешно загружены\"\n }\n\n log.info(m_l[\"start\"])\n log.debug(m_l[\"url_check\"])\n\n if re.match(pattern_http, url):\n log.debug(m_l[\"url_correct\"], url)\n try:\n log.debug(m_l[\"get_site\"])\n request_to_site = request.Request(url=url, headers=headers if headers else {})\n response = request.urlopen(request_to_site)\n except (error.URLError, error.HTTPError) as err:\n log.error(m_l[\"error\"], url, err)\n log.error(m_l[\"agent\"], headers)\n return {rslt: False, cntnt: str(err), msg: 5152}\n\n try:\n log.debug(m_l[\"parse\"])\n site_data = BeautifulSoup(response, parser)\n except error.HTTPError as err:\n log.error(m_l[\"error\"], *(url, err))\n return {rslt: False, cntnt: str(err), msg: 5152}\n else:\n\n log.debug(m_l[\"path_check\"], url)\n try:\n log.debug(m_l[\"get_site\"])\n site_data = BeautifulSoup(open(url), parser)\n except (FileNotFoundError, UnicodeDecodeError) as err:\n log.error(m_l[\"error\"], *(url, err))\n return {rslt: False, cntnt: str(err), msg: 5152}\n\n log.info(m_l[\"success\"])\n return {rslt: True, cntnt: site_data, msg: None}", "def _real_extract(self, url):\n pass", "def __fetch_data(self, url):\n try:\n response = urlopen(url)\n root = ET.fromstring(response.read())\n except HTTPError as exc:\n root = ET.fromstring(exc.read())\n raise ValueError(root.get('message'))\n return root", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def get_jsonparsed_data(url):\n response = urlopen(url)\n data = response.read().decode(\"utf-8\")\n return json.loads(data)", "def process_url(url):\n\n metadata = None\n body = None\n\n # Fetch the URL, returning a (metadata, content) tuple or None if error\n info = Scraper.fetch_url(url)\n\n if info:\n metadata, body = info\n if metadata is None:\n if Settings.DEBUG:\n print(\"No metadata\")\n metadata = dict(heading = \"\",\n author = \"\",\n timestamp = datetime.utcnow(),\n authority = 0.0)\n else:\n if Settings.DEBUG:\n print(\"Metadata: heading '{0}'\".format(metadata.heading))\n print(\"Metadata: author '{0}'\".format(metadata.author))\n print(\"Metadata: timestamp {0}\".format(metadata.timestamp))\n print(\"Metadata: authority {0:.2f}\".format(metadata.authority))\n metadata = vars(metadata) # Convert namedtuple to dict\n\n # Extract the text content of the HTML into a list\n tlist = TextList()\n extract_text(body, tlist)\n text = tlist.result()\n\n # Eliminate soft hyphen and zero-width space characters\n text = re.sub('\\u00AD|\\u200B', '', text)\n\n # Eliminate consecutive whitespace\n text = re.sub(r'\\s+', ' ', text)\n\n # Tokenize the resulting text, returning a generator\n # noinspection PyRedundantParentheses\n return (metadata, tokenize(text))", "def downloadData(url):\n \n content = urllib2.urlopen(url)\n return content", "def parse_url(url):\n results = NotifyBase.parse_url(url)\n\n if not results:\n # We're done early as we couldn't load the results\n return results\n\n # Apply our settings now\n devices = NotifyBase.unquote(results['fullpath'])\n\n # Store our devices\n results['devices'] = '%s/%s' % (results['host'], devices)\n\n return results" ]
[ "0.71271515", "0.63445747", "0.6134861", "0.5800437", "0.57923275", "0.5770726", "0.57677466", "0.57582086", "0.57255924", "0.5717655", "0.5700871", "0.5700769", "0.5692097", "0.56919336", "0.5638243", "0.56346065", "0.5613177", "0.5584839", "0.5567151", "0.5546901", "0.5540418", "0.55293757", "0.54983014", "0.54981196", "0.54981196", "0.54981196", "0.54981196", "0.548708", "0.5463298", "0.5459268" ]
0.78535324
0
Test that reset fixtures endpoint is not available when it is not set up.
def test_reset_fixtures_url_not_found_if_not_setup(settings, caplog): caplog.set_level('WARNING') settings.ALLOW_TEST_FIXTURE_SETUP = None response = _request_reset_fixtures() assert response.status_code == status.HTTP_404_NOT_FOUND assert caplog.messages == [ 'The `reset_fixture` endpoint is not enabled. The ALLOW_TEST_FIXTURE_SETUP environment' ' variable is not set.', 'Not Found: /testfixtureapi/reset-fixtures/', ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_url_found_if_env_setup():\n response = _request_reset_fixtures()\n assert response.status_code == status.HTTP_201_CREATED", "def testResetFixture(self):\n self.users.TESTAPI_resetFixture()\n self.users.add(\"katie\", \"password\")\n self.assertEqual(len(models.UsersModel.objects.all()), 1)\n self.users.TESTAPI_resetFixture()\n self.assertEqual(len(models.UsersModel.objects.all()), 0)", "def test_load_fixture_url_not_found_if_not_setup(settings, caplog):\n caplog.set_level('WARNING')\n\n settings.ALLOW_TEST_FIXTURE_SETUP = None\n response = _request_load_fixture({'fixture': [ADVISER_FIXTURE]})\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n with pytest.raises(Advisor.DoesNotExist):\n Advisor.objects.get(pk=ADVISER_FIXTURE['pk'])\n\n assert caplog.messages == [\n 'The `load_fixture` endpoint is not enabled. The ALLOW_TEST_FIXTURE_SETUP environment'\n ' variable is not set.',\n 'Not Found: /testfixtureapi/load-fixture/',\n ]", "def tearDown(self):\n super(TestSelectAPI, self).tearDown()\n self.destroy_fixtures()", "def tearDown(self):\n self.api.reset()", "def tearDown(self):\n self.api.reset()", "def tearDown(self):\n self.api.reset()", "def tearDown(self):\n self.api.reset()", "def test_reset_db_mounted(db, client, settings, reload_urls):\n reload_urls(settings)\n resp = client.post(\"/__dev/reset-db/\")\n assert resp.status_code == 404\n\n settings.DEVELOPMENT_ENDPOINTS = True\n reload_urls(settings)\n\n resp = client.post(\"/__dev/reset-db/\")\n assert resp.status_code == 200\n assert resp.json()[\"status\"] == \"done\"", "def test_user_tries_to_reset_with_wrong_email(self):\n # reset url without registering a user\n response = self.client().post(AuthTestCase.reset, data=self.reset_email)\n self.assertIn(\"Email does not exist\", str(response.data))\n # bad request\n self.assertEqual(response.status_code, 400)", "def reset_mock():\n if not var_cache['local'].reset_mock_interface():\n raise AssertionError('reset mock server fail')", "def setUp(self):\r\n super(UrlResetMixin, self).setUp()\r\n self._reset_urls()\r\n self.addCleanup(self._reset_urls)", "def tearDownFixture(self):\n pass", "def test_image_reset(self):\n\t\tresponse = self.client.get(reverse('reset'))\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_new_adviser_removed_by_reset_fixtures():\n new_adviser_pk = AdviserFactory().pk\n _request_reset_fixtures()\n with pytest.raises(Advisor.DoesNotExist):\n Advisor.objects.get(pk=new_adviser_pk)", "def setUp(self):\n super(UrlResetMixin, self).setUp()\n self._reset_urls()\n self.addCleanup(self._reset_urls)", "async def test_api_get_non_existing_state(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n resp = await mock_api_client.get(\"/api/states/does_not_exist\")\n assert resp.status == HTTPStatus.NOT_FOUND", "def test_admin_api_endpoints_removed(self) -> None:\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/registration_tokens\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/registration_tokens/new\")\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/registration_tokens/abcd\")\n self.expect_unrecognized(\"PUT\", \"/_synapse/admin/v1/registration_tokens/abcd\")\n self.expect_unrecognized(\n \"DELETE\", \"/_synapse/admin/v1/registration_tokens/abcd\"\n )\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/reset_password/foo\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/users/foo/login\")\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/register\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/register\")\n self.expect_unrecognized(\"GET\", \"/_synapse/admin/v1/users/foo/admin\")\n self.expect_unrecognized(\"PUT\", \"/_synapse/admin/v1/users/foo/admin\")\n self.expect_unrecognized(\"POST\", \"/_synapse/admin/v1/account_validity/validity\")", "def tearDown(self):\n reset()", "def test_reset_settings(self):\n\n self.feature_test.set_percentage(5)\n self.feature_test.add_to_whitelist(3)\n self.feature_test.add_to_blacklist(4)\n self.feature_test.reset_settings()\n\n generated = Feature(\"testing\")\n self.assertEqual(generated.percentage, 0)\n self.assertFalse(3 in generated.whitelist)\n self.assertFalse(4 in generated.blacklist)", "def setUp(self):\n self.setup_start_servers = False\n super(ZeroConfigTest, self).setUp()", "def setUp(self):\r\n State.query.delete()\r\n User.query.delete()\r\n Election.query.delete()\r\n RegistrationRule.query.delete()\r\n StateRegistrationRule.query.delete()\r\n self.client = app.test_client()", "def test_create_user_url_not_found_if_not_setup(settings, caplog):\n caplog.set_level('WARNING')\n\n settings.ALLOW_TEST_FIXTURE_SETUP = None\n response = _request_create_user(SEED_USER_DATA)\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n with pytest.raises(Advisor.DoesNotExist):\n Advisor.objects.get(email=SEED_USER_DATA['email'])\n\n assert caplog.messages == [\n 'The `create_user` endpoint is not enabled. The ALLOW_TEST_FIXTURE_SETUP environment'\n ' variable is not set.',\n 'Not Found: /testfixtureapi/create-user/',\n ]", "def setUp(self):\n super(TestPasswordResetRequest, self).setUp()", "def test_reset_request_non_existing_email(client, models):\n response = client.post(\n \"/password/reset-request\", json={\"email\": \"[email protected]\"}\n )\n assert response.status_code == 404", "def test_reset_default(self):\n check_attr(self.o, 'reset')\n self.subtest_noAgent(self.o)\n self.assertIsNone(self.o.reset(), \"no output expected\")\n self.subtest_someAgents(self.o, 2, 10)", "def test_empty_azure_config_dir():\n pass", "def tearDown(self):\n updateConfigurationCmd = updateConfiguration.updateConfigurationCmd()\n updateConfigurationCmd.name = \"use.external.dns\"\n updateConfigurationCmd.value = \"false\"\n updateConfigurationCmd.scopename = \"zone\"\n updateConfigurationCmd.scopeid = 1\n self.apiClient.updateConfiguration(updateConfigurationCmd)", "def test_reset():\n dev = _aws_device(wires=2)\n dev._circuit = CIRCUIT\n dev._task = TASK\n\n dev.reset()\n assert dev.circuit is None\n assert dev.task is None", "def test_get_empty_datasets(self):\n endpoints = [\"elections\", \"races\", \"candidates\", \"votes\", \"types\"]\n for endpoint in endpoints:\n response = self.client.get(\"/api/{}\".format(endpoint),\n headers=[(\"Accept\", \"application/json\")])\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.mimetype, \"application/json\")\n\n data = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(data, [])" ]
[ "0.75311947", "0.70474774", "0.6593545", "0.6578229", "0.6540039", "0.6540039", "0.6540039", "0.6540039", "0.64770347", "0.6421178", "0.6362949", "0.6320278", "0.63132834", "0.63124615", "0.62526333", "0.6231132", "0.6146283", "0.6138933", "0.61089426", "0.60708094", "0.6065636", "0.6057487", "0.6056114", "0.60310715", "0.6015795", "0.600834", "0.59846413", "0.598307", "0.59806895", "0.59377277" ]
0.8197186
0
Test that reset fixtures endpoint is available when set up.
def test_url_found_if_env_setup(): response = _request_reset_fixtures() assert response.status_code == status.HTTP_201_CREATED
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reset_fixtures_url_not_found_if_not_setup(settings, caplog):\n caplog.set_level('WARNING')\n\n settings.ALLOW_TEST_FIXTURE_SETUP = None\n response = _request_reset_fixtures()\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n assert caplog.messages == [\n 'The `reset_fixture` endpoint is not enabled. The ALLOW_TEST_FIXTURE_SETUP environment'\n ' variable is not set.',\n 'Not Found: /testfixtureapi/reset-fixtures/',\n ]", "def testResetFixture(self):\n self.users.TESTAPI_resetFixture()\n self.users.add(\"katie\", \"password\")\n self.assertEqual(len(models.UsersModel.objects.all()), 1)\n self.users.TESTAPI_resetFixture()\n self.assertEqual(len(models.UsersModel.objects.all()), 0)", "def tearDown(self):\n super(TestSelectAPI, self).tearDown()\n self.destroy_fixtures()", "def tearDown(self):\n self.api.reset()", "def tearDown(self):\n self.api.reset()", "def tearDown(self):\n self.api.reset()", "def tearDown(self):\n self.api.reset()", "def tearDownFixture(self):\n pass", "def setUp(self):\r\n super(UrlResetMixin, self).setUp()\r\n self._reset_urls()\r\n self.addCleanup(self._reset_urls)", "def setUpFixture(self):\n pass", "def fixtures():", "def test_reset_db_mounted(db, client, settings, reload_urls):\n reload_urls(settings)\n resp = client.post(\"/__dev/reset-db/\")\n assert resp.status_code == 404\n\n settings.DEVELOPMENT_ENDPOINTS = True\n reload_urls(settings)\n\n resp = client.post(\"/__dev/reset-db/\")\n assert resp.status_code == 200\n assert resp.json()[\"status\"] == \"done\"", "def setUp(self):\n super(UrlResetMixin, self).setUp()\n self._reset_urls()\n self.addCleanup(self._reset_urls)", "def setUp(self):\n server.Inventory.remove_all()\n server.Inventory(0, \"shampoo\", 2, 'new').save()\n server.Inventory(0, \"conditioner\", 5, 'new').save()\n self.app = server.app.test_client()", "def setUp(self):\r\n State.query.delete()\r\n User.query.delete()\r\n Election.query.delete()\r\n RegistrationRule.query.delete()\r\n StateRegistrationRule.query.delete()\r\n self.client = app.test_client()", "def setUp(self):\n self.setup_start_servers = False\n super(ZeroConfigTest, self).setUp()", "def test_fixture(request):\n def finalizer():\n teardown()\n request.addfinalizer(finalizer)\n setup()", "def setUp(self):\n super(TestPasswordResetRequest, self).setUp()", "def setUp(self):\n self.client = DummyClient()", "def tearDown(self):\n reset()", "def setUp(self):\n self.client = Client()", "def setUp(self):\n self.client = Client()", "def setUp(self):\n self.client = Client()", "def setUp(self):\n self.client = Client()", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n self.server_address = \"http://localhost:3030/$/\"\n self.request_address = \"http://localhost:3030/ds\"\n self.api = \"http://localhost:4032/\"\n self.version = \"0.2\"", "def setUp(self):\r\n\r\n app.config['TESTING'] = True\r\n self.client = app.test_client()", "def test_image_reset(self):\n\t\tresponse = self.client.get(reverse('reset'))\n\t\tself.assertEqual(response.status_code, status.HTTP_200_OK)", "def reset_mock():\n if not var_cache['local'].reset_mock_interface():\n raise AssertionError('reset mock server fail')" ]
[ "0.7736993", "0.72713464", "0.6802365", "0.6663078", "0.6663078", "0.6663078", "0.6663078", "0.65864545", "0.6559959", "0.65343267", "0.6504166", "0.6477931", "0.6456893", "0.64509386", "0.64125675", "0.63783133", "0.63711256", "0.63341296", "0.62854946", "0.6279207", "0.62524074", "0.62524074", "0.62524074", "0.62524074", "0.62408763", "0.62408763", "0.62384856", "0.6219172", "0.61764413", "0.6159353" ]
0.7328642
1
Test new adviser is removed by reset fixtures.
def test_new_adviser_removed_by_reset_fixtures(): new_adviser_pk = AdviserFactory().pk _request_reset_fixtures() with pytest.raises(Advisor.DoesNotExist): Advisor.objects.get(pk=new_adviser_pk)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tearDown(self):\n del self.review", "def teardown(self):\n del self.testInst, self.dname\n\n return", "def tearDown(self):\n del self.new", "def tearDown(self):\n self.labGroup.delete()", "def teardown(self):\n del self.testInst, self.dname, self.test_vals, self.test_fracs\n\n return", "def tearDownFixture(self):\n pass", "def tearDown(self):\n super(TestSelectAPI, self).tearDown()\n self.destroy_fixtures()", "def tearDown(self):\n self.labGroup.delete()\n super(NoLabForUser, self).tearDown()", "def tearDown(self):\n del self.a", "def setUp(self):\n Pet.remove_all()", "def teardown(self):\n del self.testInst, self.dname, self.test_val_length\n\n return", "def teardown(self):\n del self.testInst, self.dname, self.test_val_length\n\n return", "def tearDown(self):\n try:\n os.remove(self.fixtureFile)\n except OSError:\n pass", "def teardown(self):", "def teardown(self):", "def teardown(self):", "def tearDown(self):\n try:\n os.remove(self.fixture_file)\n except OSError:\n pass", "def teardown(self):\n\n del self.testC, self.insts, self.testInst, self.dname, self.test_vals\n\n return", "def teardown(self):\n for mr in self.mrs:\n mr.restore_pretest(pretest=mr.pretest_info)", "def tearDown(self):\n for name in ('Config', 'FlatPage'):\n model = getattr(shavida.models, name)\n model.objects.all().delete()\n\n # def test_is_first_refill_with_member_who_never_refilled(self):\n # customer = Member.objects.create_user(account_type=Member.CUSTOMER, username='77777777', password='123456',\n # email='[email protected]', postpaid_plan=None,\n # storage_provider='CVB')\n # self.assertTrue(customer.is_first_refill)\n\n # def test_is_first_refill_with_member_who_refilled_once(self):\n # prepaid_plan = PrepaidPlan(id=2, name='plan1', cost=5000)\n # storage = Storage(name='storage', size=32000, size_label=32000, type=Storage.FLASH_DISK)\n # prepaid_plan.save()\n # storage.save()\n # customer = Member.objects.create_user(account_type=Member.CUSTOMER, username='77777777', password='123456',\n # email='[email protected]', postpaid_plan=None, prepaid_plan=prepaid_plan,\n # storage_provider='CVB', storage_status=Storage.ACQUIRING)\n # when = datetime.now() - timedelta(days=12)\n # latest_prepayment = RetailPrepayment(member=customer, when=when, amount=5000, storage=storage, duration=30, balance=20000)\n # latest_prepayment.save()\n # self.assertFalse(customer.can_order_adult)\n\n # def test_can_order_adult_with_member_having_prepaid_plan_and_max_orders_reached(self):\n # customer = Member.objects.create_user(account_type=Member.CUSTOMER, username='77777777', password='123456',\n # email='[email protected]', postpaid_plan=None,\n # storage_provider='CVB' )\n # latest_prepayment = RetailPrepayment(member=customer, amount=5000, duration=30, balance=20000)\n # latest_prepayment.save()\n # for i in range(4):\n # order = CVBOrder(member=customer, cost=5000, status=CVBOrder.PENDING,\n # storage_amount=0, movies_amount=0, delivery_amount=0,copy_amount=0)\n # order.save()\n # self.assertTrue(customer.can_order_adult)\n #\n # def test_can_order_adult_with_member_having_prepaid_plan_and_max_orders_not_reached(self):\n # customer = Member.objects.create_user(account_type=Member.CUSTOMER, username='77777777', password='123456',\n # email='[email protected]', postpaid_plan=None,\n # storage_provider='CVB')\n # latest_prepayment = RetailPrepayment(member=customer, amount=5000, duration=30, balance=20000)\n # latest_prepayment.save()\n # for i in range(2):\n # order = CVBOrder(member=customer, cost=5000, status=CVBOrder.PENDING,\n # storage_amount=0, movies_amount=0, delivery_amount=0,copy_amount=0)\n # order.save()\n # self.assertFalse(customer.can_order_adult)\n #\n # def test_can_order_adult_with_member_having_prepaid_plan_and_prepayment_expired(self):\n # customer = Member.objects.create_user(account_type=Member.CUSTOMER, username='77777777', password='123456',\n # email='[email protected]', postpaid_plan=None,\n # storage_provider='CVB')\n # when = datetime.now() - timedelta(days=40)\n # latest_prepayment = RetailPrepayment(member=customer, when=when, amount=5000, duration=30, balance=20000)\n # latest_prepayment.save()\n # self.assertTrue(customer.can_order_adult)\n #\n # def test_can_order_adult_with_member_having_prepaid_plan_and_prepayment_not_expired(self):\n # customer = Member.objects.create_user(account_type=Member.CUSTOMER, username='77777777', password='123456',\n # email='[email protected]', postpaid_plan=None,\n # storage_provider='CVB')\n # when = datetime.now() - timedelta(days=12)\n # latest_prepayment = RetailPrepayment(member=customer, when=when, amount=5000, duration=30, balance=20000)\n # latest_prepayment.save()\n # self.assertFalse(customer.can_order_adult)", "def tearDown(self):\n del self.business_item_class", "def tearDown(self) -> None:\n\n del self.checker", "def tearDown(self):\n self.teardown_beets()", "def teardown(self):\n pass", "def teardown(self):\n pass", "def teardown(self):\n pass", "def tearDown(self):\n super(LabForUser, self).tearDown()\n if self.compound is not None:\n self.compound.delete()", "def teardown(self) -> None:", "def teardown(self) -> None:", "def teardown(self) -> None:" ]
[ "0.6999229", "0.6923117", "0.68716764", "0.6839896", "0.6822065", "0.6816475", "0.68079275", "0.6801706", "0.6726831", "0.66917264", "0.66471547", "0.66471547", "0.6638085", "0.66222286", "0.66222286", "0.66222286", "0.66038674", "0.6575041", "0.6545409", "0.65337646", "0.6512986", "0.6509363", "0.6508515", "0.64975417", "0.64975417", "0.64975417", "0.64856595", "0.64837205", "0.64837205", "0.64837205" ]
0.793325
0
Test that create user endpoint is not available when it is not set up.
def test_create_user_url_not_found_if_not_setup(settings, caplog): caplog.set_level('WARNING') settings.ALLOW_TEST_FIXTURE_SETUP = None response = _request_create_user(SEED_USER_DATA) assert response.status_code == status.HTTP_404_NOT_FOUND with pytest.raises(Advisor.DoesNotExist): Advisor.objects.get(email=SEED_USER_DATA['email']) assert caplog.messages == [ 'The `create_user` endpoint is not enabled. The ALLOW_TEST_FIXTURE_SETUP environment' ' variable is not set.', 'Not Found: /testfixtureapi/create-user/', ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_create_user_with_no_role(setup_client):\n client = setup_client\n payload = {\n 'email': '[email protected]',\n 'password': 'password',\n 'name': 'Test name'\n }\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_400_BAD_REQUEST\n user_exists = get_user_model().objects.filter(\n email=payload['email']).exists()\n assert not user_exists", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_create_user(self):\n pass", "def test_dont_create_user(self):\n self.assertFalse(User.objects.exists())", "def test_user_exists(self):\n payload = {'email': '[email protected]','password': 'testpass'}\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_user_exists(self):\n payload = {\n \"email\": \"[email protected]\",\n \"name\": \"Test\",\n 'password': 'test123'\n }\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_create_no_email(self):\n\n self.portal.portal_properties.site_properties.use_email_as_login = True\n\n self.assertRaises(\n ValueError,\n api.user.create,\n username='chuck', password='secret'\n )", "def test_nonexistent_user(self):\n nonexistent_username = \"nonexistent user\"\n self.retired_username = get_retired_username_by_username(nonexistent_username)\n data = {'username': nonexistent_username}\n headers = self.build_jwt_headers(self.superuser)\n response = self.client.post(self.url, data, **headers)\n self.assert_response_correct(response, 404, None)", "def test_user_exists(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass123'\n }\n create_user(**payload)\n res = self.client.post(CREATE_USER_API, payload)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_if_not_created_unauthorized(self):\r\n payload = {\r\n \"email\": \"[email protected]\",\r\n \"password\": \"password\",\r\n \"name\": \"asdasd\",\r\n \"is_superuser\": False,\r\n }\r\n\r\n res = self.client_not_authenticated.post(reverse(CREATE_USER_URL),data=payload)\r\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_good_user_creation(self):\n data = json.dumps({\n \"username\" : \"mark\", \"email\" : \"[email protected]\",\n \"password\" : \"secret12345\", \"confirm_password\" : \"secret12345\"})\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 201)", "def test_signup_attempt_with_nonexistent_user(self):\n\n details = {\"email\": \"[email protected]\",\n \"password\": \"oiudof987ewrqlwe\"}\n\n res = self.client.post(\n \"/api/v2/auth/login\", data=json.dumps(details), content_type=\"application/json\")\n self.assertEqual(res.status_code, 404)", "def test_create_user_invalid_method(self):\n res = self.client.get(CREATE_USER_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)", "def test_08_create_user_not_exists(self):\n\n _, user = self.get_random_item(models.User)\n utils.create_user(user, session=self.session)\n success, error = utils.create_user(user, session=self.session)\n self.assertFalse(success)\n self.assertTrue(error)", "def test_unauthorized_create_user(self):\n res = self.submit()\n\n assert res.status_code == 401", "def test_create(self):\n urls = [reverse('api:user-list')]\n data = {\n \"username\": \"newuser\",\n \"email\": \"[email protected]\",\n \"password\": \"password\"\n }\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_1_client],\n \"allowed\": [self.admin_client]\n }\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.post(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n self.assertEqual(\n client.post(url, data, format='json').status_code,\n status.HTTP_201_CREATED\n )", "def test_create_user_exists(self):\n payload = {\n \"user\": {\n \"email\": \"[email protected]\",\n \"password\": \"useruser111\",\n \"is_doctor\": False,\n \"is_hospital_admin\": True\n },\n 'first_name': 'Test',\n 'last_name': 'JustUser'\n }\n sample_user(\n payload['user']['email'], payload['user']['password'],\n payload['user']['is_doctor'], payload['user']['is_hospital_admin']\n ),\n response = self.client.post(HOSPITAL_ADMIN_REGISTER_URL, payload, format='json')\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_exists(self):\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'testpass',\r\n 'name': 'Maks'\r\n }\r\n create_user(**payload)\r\n\r\n res = self.client.post(CREATE_USER_URL, payload)\r\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_exists(self):\n\n payload = {\n 'email': '[email protected]',\n 'password': 'test11',\n 'name': \"test name\"\n }\n\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_api_can_create_users(self):\n res = self.client().post('/api/v1/user/', data = self.req)\n self.assertEquals(res.status_code, 200)\n self.assertIn('mary', str(res.data))", "def test_create_no_username(self):\n\n # If there is no username, email will be used instead\n properties = self.portal.portal_properties.site_properties\n properties.manage_changeProperties(use_email_as_login=True)\n\n user = api.user.create(\n email='[email protected]',\n password='secret'\n )\n\n self.assertEquals(user.getUserName(), '[email protected]')\n\n # But if using emails as a username is disabled, we should get\n # an error\n properties.manage_changeProperties(use_email_as_login=False)\n\n self.assertRaises(\n ValueError,\n api.user.create,\n email='[email protected]', password='secret'\n )", "def test_user_exists(self):\n # requirments for creating user\n payload = {\n 'email': '[email protected]',\n 'password': 'abcd1234',\n 'name': 'Test',\n }\n\n # call the create function above\n create_user(**payload)\n\n # this will do a HTTP POST request and create a user\n response = self.client.post(CREATE_USER_URL, payload)\n\n # Check if statuscode returns a HTTP400 bad request\n # becos user already exist\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_exists(setup_client):\n client = setup_client\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass',\n 'role': 'Supplier',\n 'name': 'Test name'\n }\n create_user(**payload)\n res = client.post(CREATE_USER_URL, payload)\n assert res.status_code == status.HTTP_400_BAD_REQUEST", "def test_create__forbidden(self):\n testing_config.sign_in('[email protected]', 123567890)\n\n with test_app.test_request_context(self.request_path):\n with self.assertRaises(werkzeug.exceptions.Forbidden):\n self.handler.do_post(self.appuser_id)\n\n new_appuser = user_models.AppUser.query(\n user_models.AppUser.email == '[email protected]').get()\n self.assertIsNone(new_appuser)", "def test_createUser_single(self):\n #TODO: this and other tests", "def test_user_does_not_exist(self):\n url = reverse('main_app:user', args=('non-exist',))\n response = self.client.get(url)\n self.assertEqual(response.status_code, 404)", "def test_able_to_create_a_user():\n response = api_helper.create_user(pytest.test_user)\n assert response.status_code == 201\n check_user_data_in_response(response.json()[\"data\"])", "def test_user_exists(self):\n payload = {'email': '[email protected]', 'password': 'password'}\n create_user(**payload)\n\n res = self.client.post(CREATE_USER_URL, payload)\n\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_user_existing_email(self):\n data = json.dumps({\n \"username\" : \"john\", \"email\" : \"[email protected]\",\n \"password\" : \"secret12345\", \"confirm_password\" : \"secret12345\"})\n res = self.app.post( # pylint: disable=W0612\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n response = self.app.post(\n '/api/v3/users', data=data,\n content_type='application/json',\n headers=self.admin_header)\n self.assertEqual(response.status_code, 400)" ]
[ "0.7512048", "0.749446", "0.749446", "0.749446", "0.7410163", "0.7287913", "0.7146045", "0.70801765", "0.70597535", "0.70591325", "0.70539397", "0.7052985", "0.7045943", "0.70404804", "0.70168406", "0.69980085", "0.6990669", "0.6990461", "0.6972314", "0.6959509", "0.69546616", "0.6951751", "0.69412506", "0.6939021", "0.69386727", "0.69259274", "0.6904014", "0.6890686", "0.68824935", "0.6879743" ]
0.7793711
0
Test that created user has token in the cache.
def test_created_user_has_token_in_cache(): _request_create_user(SEED_USER_DATA) token = SEED_USER_DATA['token'] cache_key = f'access_token:{token}' expected_data = { 'email': SEED_USER_DATA['email'], 'sso_email_user_id': SEED_USER_DATA['sso_email_user_id'], } assert cache.get(cache_key) == expected_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_created_token(self):\n user = USER_MODEL.objects.get(username='adam')\n self.assertIsInstance(user.auth_token, Token)", "def test_user_cache(self):\n original_token = TestExpirableToken(user=self.user)\n token = TestExpirableToken.from_key(original_token.key)\n\n def test_init_cache():\n user = original_token.user\n\n def test_user_cache():\n user = token.user\n\n self.assertNumQueries(0, test_init_cache)\n self.assertNumQueries(0, test_user_cache)", "def test_returns_new_user_with_correct_email_if_token_exists(self):\r\n email = '[email protected]'\r\n token = Token.objects.create(email=email)\r\n user = PasswordlessAuthenticationBackend().authenticate(token.uid)\r\n new_user = User.objects.get(email=email)\r\n self.assertEquals(user, new_user)", "def test_get_existing_token_authenticated_user(self):\r\n\r\n user = UserFactory.create_batch(2)[1]\r\n user.info = create_tokens_for(user)\r\n\r\n # If the token exists, it should be retrieved\r\n res = self.app.get('/api/token/twitter?api_key=' + user.api_key)\r\n data = json.loads(res.data)\r\n\r\n assert data.get('twitter_token') is not None, data\r\n assert data.get('twitter_token')['oauth_token'] == 'token-for-%s' % user.name\r\n assert data.get('twitter_token')['oauth_token_secret'] == 'secret-for-%s' % user.name\r\n # And no other tokens should\r\n assert data.get('facebook_token') is None, data", "def test_token_only_for_1_user(self):\n db.session.add(self.user, self.user2)\n db.session.commit()\n user_token = self.user.generate_auth_token(1)\n self.assertNotEqual(self.user.verify_auth_token(user_token),\n self.user2)", "def test_expired_token(self):\n db.session.add(self.user, self.user2)\n db.session.commit()\n user_token = self.user.generate_auth_token(1)\n sleep(2)\n self.assertIsNone(self.user.verify_auth_token(user_token))", "def test_returns_existing_user_with_correct_email_if_token_exists(self):\r\n email = '[email protected]'\r\n existing_user = User.objects.create(email=email)\r\n token = Token.objects.create(email=email)\r\n user = PasswordlessAuthenticationBackend().authenticate(token.uid)\r\n self.assertEquals(user, existing_user)", "def test_token_verification(self):\n db.session.add(self.user, self.user2)\n db.session.commit()\n user_token = self.user.generate_auth_token(1)\n self.assertEqual(self.user.verify_auth_token(user_token), self.user)\n self.assertIsNone(self.\n user.verify_auth_token('jdjdje230920093944334j'))", "def test_create_user(self):\n data = {\n 'username': 'foobar',\n 'email': '[email protected]',\n 'password': 'somepassword'\n }\n response = self.client.post(self.create_url, data, format='json')\n user = User.objects.latest('id')\n token = Token.objects.get(user=user)\n\n self.assertEqual(User.objects.count(), 2)\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(response.data['username'], data['username'])\n self.assertEqual(response.data['email'], data['email'])\n self.assertFalse('password' in response.data)\n self.assertEqual(response.data['token'], token.key)", "def test_create_token_valid(self):\n create_mock_user(**self.mock_user)\n res = self.client.post(TOKEN_URL, self.mock_user)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertIn('token', res.data)", "def test_get_token_sucess(self):\n url = '/api-token-auth/'\n data = {'username': 'adam', 'password': '123'}\n\n response = Client().post(url, data)\n self.assertEqual(response.status_code, 200)\n\n # Check if the returned token is the same of User's Token.\n content = json.loads(response.content)\n user = USER_MODEL.objects.get(username='adam')\n self.assertEqual(content['token'], user.auth_token.key)", "def test_get_token(self):\n user_data = {\n 'username': 'Einstein',\n 'password': 'strongpsw123'\n }\n count_user_before = User.objects.count()\n count_token_before = Token.objects.count()\n\n url = '/api/users/'\n response = self.client.post(url, user_data, format='json')\n\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertTrue(response.data['token'] and type(response.data['token'] == str))\n\n self.assertEqual(User.objects.count(), count_user_before + 1)\n self.assertEqual(Token.objects.count(), count_token_before + 1)", "def test_token_cache(self, mock_check_token_not_revoked,\n mock_get_issuer_public_key):\n # Mock the external call to retrieve the IAM public key\n # used in the _verify_token and valid_token_to_id call\n mock_get_issuer_public_key.return_value = PUBLIC_KEY\n # Mock the external call to check the token has not been rejected\n # used in the valid_token_to_id call\n mock_check_token_not_revoked.return_value = CLIENT_ID\n\n payload_list = []\n\n # This payload will be valid as we will sign it with PRIVATE_KEY\n payload = self._standard_token()\n\n # Add the same token twice, this is what tests the cache functionality\n payload_list = [payload, payload]\n\n for payload in payload_list:\n token = self._create_token(payload, PRIVATE_KEY)\n with self.settings(IAM_HOSTNAME_LIST=['iam-test.idc.eu']):\n self.assertEqual(\n self._token_checker.valid_token_to_id(token), CLIENT_ID,\n \"Token with payload %s should not be accepted!\" % payload\n )", "def testCachingGetUserUsesTheCache(self):\n createUser(u'user', u'password', u'User', u'[email protected]')\n user = self.getUser(u'user')\n self.assertIsInstance(user, User)\n\n # Delete the user from the store\n self.store.remove(user)\n user = self.getUser(u'user')\n self.assertIsInstance(user, User)", "def test_create_token_for_user(self):\n\n credentials = {'email': '[email protected]', 'password': 'Testpass12'}\n get_user_model().objects.create_user(**credentials)\n\n response = self.client.post(URL_TOKEN, credentials)\n\n # Check that the response is HTTP 200, and contains a token.\n self.assertIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_create_token_for_user(self):\n payload = {'email': '[email protected]',\n 'password': 'testPassWord',\n 'time_zone': 'Europe/Dublin'}\n create_user(**payload)\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def testGet(self):\n userdict = {'id': 1,\n 'objectID': u'04585bec-28cf-4a21-bc3e-081f3ed62680',\n 'username': u'testuser',\n 'passwordHash': 'hash',\n 'fullname': u'Test User',\n 'email': u'[email protected]',\n 'role': Role.ANONYMOUS.id}\n self.cache.set('user:testuser', json.dumps(userdict))\n result = self.userCache.get(u'testuser')\n user = result.results\n self.assertEqual(1, user.id)\n self.assertEqual('04585bec-28cf-4a21-bc3e-081f3ed62680',\n str(user.objectID))\n self.assertEqual(u'testuser', user.username)\n self.assertEqual('hash', user.passwordHash)\n self.assertEqual(u'Test User', user.fullname)\n self.assertEqual(u'[email protected]', user.email)\n self.assertEqual(Role.ANONYMOUS, user.role)", "def test_get_user_and_token_after_creation(self):\n self.request.json_body = deepcopy(self.new_account)\n result = users_post_view(self.request)['d']\n session = self.session.query(Session).one()\n user = self.session.query(User).one()\n expected = dict_from_row(user, remove_fields=removals)\n expected['session'] = dict_from_row(session, remove_fields=removals)\n self.assertEqual(result, expected)", "def test_get_all_existing_tokens_authenticated_user(self):\r\n\r\n user = UserFactory.create_batch(2)[1]\r\n user.info = create_tokens_for(user)\r\n del user.info['google_token']\r\n\r\n res = self.app.get('api/token?api_key=' + user.api_key)\r\n data = json.loads(res.data)\r\n\r\n assert data.get('twitter_token') is not None, data\r\n assert data.get('facebook_token') is not None, data\r\n assert data.get('google_token') is None, data", "def test_create_token_for_user(self):\n payload = {'email': '[email protected]', 'password': 'testpass'}\n create_user(**payload)\n res = self.client.post(TOKEN_URL, payload)\n\n self.assertIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_authtoken_refresh(self):\n hagrid = models.User(username='hagrid', fullname='Rubeus Hagrid')\n auth_token = models.AuthToken(user=hagrid, algorithm='hmac-sha-1')\n existing_token = auth_token.token\n existing_secret = auth_token.secret\n auth_token.refresh()\n self.assertNotEqual(existing_token, auth_token.token)\n self.assertNotEqual(existing_secret, auth_token.secret)", "def test_create_token_for_user(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'abcd1234',\n }\n\n create_user(**payload)\n response = self.client.post(TOKEN_URL, payload)\n\n # We expect a token and should get a HTTP 200\n self.assertIn('token', response.data)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_create_token_for_user(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'testpass'\n }\n create_user(**payload)\n res = self.client.post(TOKEN_URI, payload)\n self.assertIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_create_token_for_user(self):\r\n payload = {\r\n 'email': '[email protected]',\r\n 'password': 'Testpass123',\r\n 'name': 'Maks'\r\n }\r\n create_user(**payload)\r\n\r\n res = self.client.post(TOKEN_URL, payload)\r\n\r\n self.assertIn('token', res.data)\r\n self.assertEqual(res.status_code, status.HTTP_200_OK)", "def test_authtoken_user(self):\n crusoe = self.fixtures.crusoe\n auth_client = self.fixtures.auth_client\n\n user_session = models.UserSession(buid=buid(), user=crusoe)\n auth_token_with_user_session = models.AuthToken(\n user=crusoe, user_session=user_session\n )\n self.assertIsInstance(\n auth_token_with_user_session.user_session.user, models.User\n )\n self.assertEqual(auth_token_with_user_session.user_session.user, crusoe)\n\n auth_token_without_user_session = models.AuthToken(\n auth_client=auth_client, user=crusoe\n )\n self.assertIsInstance(auth_token_without_user_session._user, models.User)\n self.assertEqual(auth_token_without_user_session._user, crusoe)", "def test_good_token(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n cred = self.factory.create(access_token='good_token', expires_at=self.current_dt)\n with HTTMock(spark_cloud_mock):\n refresh_access_token()\n self.assertEqual(CloudCredentials.objects.count(), 1)\n self.assertEqual(CloudCredentials.objects._access_token(), 'good_token')\n cred.delete()", "def test_make_token(self):\n user = self.create_user()\n\n token_generator = EmailActivationTokenGenerator()\n token = token_generator.make_token(user)\n self.assertTrue(token_generator.check_token(user, token))", "def test_user_login(self):\n response = self.client.post(self.login_url, self.login_data, format=\"json\")\n token = response.data.get('token')\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(Token.objects.count(), 1)\n self.assertEqual(Token.objects.get().key, token)", "def test_create_token_to_user(self):\n data = {\n 'email': '[email protected]', \n 'password': \"testtest\"\n }\n res = self.client.post(TOKEN_URL, data)\n\n self.assertNotIn('token', res.data)\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_reset_user_duplicate(self):\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(users=[user])\n\n # Submit password reset\n url = \"/v1/actions/ResetPassword\"\n data = {\"email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(\n response.json()[\"notes\"],\n [\"If user with email exists, reset token will be issued.\"],\n )\n\n # Verify the first token doesn't work\n first_token = Token.objects.all()[0]\n\n # Submit password reset again\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(\n response.json()[\"notes\"],\n [\"If user with email exists, reset token will be issued.\"],\n )\n\n # confirm the old toke has been cleared:\n second_token = Token.objects.all()[0]\n self.assertNotEqual(first_token.token, second_token.token)\n\n # Now reset with the second token\n url = \"/v1/tokens/\" + second_token.token\n data = {\"password\": \"new_test_password2\"}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(user.password, \"new_test_password2\")" ]
[ "0.77992666", "0.7673849", "0.73526937", "0.7254399", "0.72008485", "0.7190135", "0.7173182", "0.7171363", "0.71571124", "0.71343976", "0.7132663", "0.70779663", "0.7049543", "0.70089614", "0.70026636", "0.6930913", "0.6918508", "0.6897834", "0.6893251", "0.68858415", "0.68739736", "0.68188316", "0.6807183", "0.680509", "0.6751077", "0.6713112", "0.6699036", "0.66909146", "0.6654642", "0.66526306" ]
0.8843756
0
Test that load fixture endpoint is not available when it is not set up.
def test_load_fixture_url_not_found_if_not_setup(settings, caplog): caplog.set_level('WARNING') settings.ALLOW_TEST_FIXTURE_SETUP = None response = _request_load_fixture({'fixture': [ADVISER_FIXTURE]}) assert response.status_code == status.HTTP_404_NOT_FOUND with pytest.raises(Advisor.DoesNotExist): Advisor.objects.get(pk=ADVISER_FIXTURE['pk']) assert caplog.messages == [ 'The `load_fixture` endpoint is not enabled. The ALLOW_TEST_FIXTURE_SETUP environment' ' variable is not set.', 'Not Found: /testfixtureapi/load-fixture/', ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_reset_fixtures_url_not_found_if_not_setup(settings, caplog):\n caplog.set_level('WARNING')\n\n settings.ALLOW_TEST_FIXTURE_SETUP = None\n response = _request_reset_fixtures()\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n assert caplog.messages == [\n 'The `reset_fixture` endpoint is not enabled. The ALLOW_TEST_FIXTURE_SETUP environment'\n ' variable is not set.',\n 'Not Found: /testfixtureapi/reset-fixtures/',\n ]", "def test_url_found_if_env_setup():\n response = _request_reset_fixtures()\n assert response.status_code == status.HTTP_201_CREATED", "def test_create_user_url_not_found_if_not_setup(settings, caplog):\n caplog.set_level('WARNING')\n\n settings.ALLOW_TEST_FIXTURE_SETUP = None\n response = _request_create_user(SEED_USER_DATA)\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n with pytest.raises(Advisor.DoesNotExist):\n Advisor.objects.get(email=SEED_USER_DATA['email'])\n\n assert caplog.messages == [\n 'The `create_user` endpoint is not enabled. The ALLOW_TEST_FIXTURE_SETUP environment'\n ' variable is not set.',\n 'Not Found: /testfixtureapi/create-user/',\n ]", "def test_missing_endpoint(self, req):\n req.side_effect = ks_exc.EndpointNotFound()\n self.client._get_resource_provider(self.context, \"fake\")\n\n # reset the call count to demonstrate that future calls still\n # work\n req.reset_mock()\n self.client._get_resource_provider(self.context, \"fake\")\n self.assertTrue(req.called)", "def test_not_loaded(person):\n with pytest.raises(KeyError):\n person.load(-1)\n\n assert person.loaded is False", "async def test_api_get_non_existing_state(\n hass: HomeAssistant, mock_api_client: TestClient\n) -> None:\n resp = await mock_api_client.get(\"/api/states/does_not_exist\")\n assert resp.status == HTTPStatus.NOT_FOUND", "def test_index_loads_properly(self):\n response = self.client.get('localhost:8000')\n self.assertEqual(response.status_code, 404)", "def test_load_no_backend(self):\n sample_file = os.path.join(os.path.dirname(__file__), \"broken-config-no-backend.yaml\")\n self.assertTrue(os.path.exists(sample_file), \"Did not found {}\".format(sample_file))\n\n def try_it():\n self.configurator.load_yaml_file(sample_file)\n\n self.assertRaises(ConfigurationError, try_it)", "def test_not_found(self):\n self._error_test(fitbit_exceptions.HTTPNotFound)", "def test_missing_data_sources(self):", "def test_get_nonexistent_test(self):\n response = self.app.test_client().get('/test/99999')\n self.assertEqual(response.status_code, 404)\n self.assert_template_used('test/test_not_found.html')", "def test_not_existing_url(client):\n response = client.get('/not-exists')\n assert response.status_code == 404", "def test_load_non_existant_protocol():\n Protocol.load(path(__file__).parent /\n path('protocols') /\n path('no protocol'))", "def test_endpoint_access_fail(self):\n url = reverse('users:activate-from-email', args=(1, 1))\n res = self.client.get(url)\n self.assertEqual(res.status_code, status.HTTP_503_SERVICE_UNAVAILABLE)", "def test_no_vault_secrets(mock_load, localhost_client, gen_input_config):\n mock_load.return_value = gen_input_config(vault_secrets={})\n\n localhost_client.load(\"in.json\")\n\n mock_load.assert_called_with(\"in.json\")", "def test_load_invalid_response(\n mock_hvac_client_read, mock_load, localhost_client, gen_input_config, gen_processed_config, gen_vault_response_kv2\n):\n mock_hvac_client_read.return_value = {\"invalid_data\": {}}\n mock_load.return_value = gen_input_config()\n\n with pytest.raises(RuntimeError):\n localhost_client.load(\"in.json\")\n\n mock_hvac_client_read.assert_called_with(gen_input_config()[\"vault_secrets\"][\"acme.user\"])\n mock_load.assert_called_with(\"in.json\")", "def test_startup_defensive(self) -> None:\n self.assertFalse(self.client.triggers, 'predefined triggers')\n self.assertIsNone(self.client.websocket, 'websocket without triggers')", "def test_no_endpoint(self):\n self.os_fixture.v3_token.remove_service('monitoring')\n conn = self._get_conn()\n # Monasca is not in the service catalog\n self.assertRaises(\n ks_exc.catalog.EndpointNotFound, getattr, conn, 'monitoring'\n )", "def test_load_unknow_page(self):\n\n c = Client()\n response = c.get('/unknown_page/')\n\n self.assertEqual(response.status_code, 404)", "def test_missing_endpoint_create_client(self, req, create_client):\n req.side_effect = ks_exc.EndpointNotFound()\n self.client._get_resource_provider(self.context, \"fake\")\n\n # This is the second time _create_client is called, but the first since\n # the mock was created.\n self.assertTrue(create_client.called)", "def test_view_url_exists_api_alerts(self):\n response = self.client.get('/api/alerts/')\n self.assertEqual(response.status_code, 200)", "def test_load__fail_empty_artifacts_path():\n ContractHandler.artifacts_path = None\n with pytest.raises(AssertionError):\n ContractHandler._load(\"DTFactory\")", "def testLoadConfigs_notExist(self):\n config_path = GetTestFilePath('unified_lab_config/invalid_lab/hosts')\n with six.assertRaisesRegex(\n self, lab_config.ConfigError, r'.* doesn\\'t exist.'):\n pool = lab_config.UnifiedLabConfigPool(config_path)\n pool.LoadConfigs()", "def test_unknown_routes():\n response = client.get(\"/unknown_endpoint\",\n headers={\"content-type\": \"application/json\"})\n assert response.status_code == status.HTTP_404_NOT_FOUND\n assert response.json() == {\"detail\": \"Not Found\"}", "def test_missing_file(self):\n with self.assertRaises(ConfigFileError):\n engine = Engine(\"/asdfhdfgkjldhsfg.json\", self.api_token)", "def test_get_non_existing(self):\n\n response = self.client.get('/auth/non-existing-resource')\n\n self.assert404(response)\n self.assertEqual('not found', response.json['error'])", "def test_no_setting(self):\n with self.assertRaises(ImproperlyConfigured):\n import_from_setting('DOES_NOT_EXIST')", "def test_get_empty_datasets(self):\n endpoints = [\"elections\", \"races\", \"candidates\", \"votes\", \"types\"]\n for endpoint in endpoints:\n response = self.client.get(\"/api/{}\".format(endpoint),\n headers=[(\"Accept\", \"application/json\")])\n self.assertEqual(response.status_code, 200)\n self.assertEqual(response.mimetype, \"application/json\")\n\n data = json.loads(response.data.decode(\"ascii\"))\n self.assertEqual(data, [])", "def test_no_participant(self):\n response = self.client.get(self.api_link)\n self.assertEqual(response.status_code, 404)", "def test_load_fixture(caplog):\n caplog.set_level('INFO')\n\n with pytest.raises(Advisor.DoesNotExist):\n Advisor.objects.get(pk=ADVISER_FIXTURE['pk'])\n\n response = _request_load_fixture({'fixture': [ADVISER_FIXTURE]})\n\n assert response.status_code == status.HTTP_201_CREATED\n\n adviser = Advisor.objects.get(pk=ADVISER_FIXTURE['pk'])\n assert adviser.email == ADVISER_FIXTURE['fields']['email']\n assert adviser.first_name == ADVISER_FIXTURE['fields']['first_name']\n assert adviser.last_name == ADVISER_FIXTURE['fields']['last_name']\n assert str(adviser.dit_team_id) == ADVISER_FIXTURE['fields']['dit_team']\n\n fixture_info = [\n 'Loading fixture: [',\n ' {',\n ' \"fields\": {',\n f' \"dit_team\": \"{ADVISER_FIXTURE[\"fields\"][\"dit_team\"]}\",',\n f' \"email\": \"{ADVISER_FIXTURE[\"fields\"][\"email\"]}\",',\n f' \"first_name\": \"{ADVISER_FIXTURE[\"fields\"][\"first_name\"]}\",',\n f' \"last_name\": \"{ADVISER_FIXTURE[\"fields\"][\"last_name\"]}\"',\n ' },',\n ' \"model\": \"company.advisor\",',\n f' \"pk\": \"{ADVISER_FIXTURE[\"pk\"]}\"',\n ' }',\n ']',\n ]\n assert caplog.messages == ['\\n'.join(fixture_info)]" ]
[ "0.7058743", "0.68539953", "0.6779199", "0.6734878", "0.64859587", "0.6472031", "0.6463761", "0.6420841", "0.64076334", "0.63773966", "0.6344679", "0.63359773", "0.63262814", "0.62421185", "0.62360024", "0.62192965", "0.61953586", "0.61665833", "0.61644936", "0.615883", "0.61522716", "0.61482805", "0.6133322", "0.61326087", "0.61299324", "0.6120603", "0.61093515", "0.6107494", "0.60951775", "0.60944957" ]
0.8150852
0
Test load fixture endpoint.
def test_load_fixture(caplog): caplog.set_level('INFO') with pytest.raises(Advisor.DoesNotExist): Advisor.objects.get(pk=ADVISER_FIXTURE['pk']) response = _request_load_fixture({'fixture': [ADVISER_FIXTURE]}) assert response.status_code == status.HTTP_201_CREATED adviser = Advisor.objects.get(pk=ADVISER_FIXTURE['pk']) assert adviser.email == ADVISER_FIXTURE['fields']['email'] assert adviser.first_name == ADVISER_FIXTURE['fields']['first_name'] assert adviser.last_name == ADVISER_FIXTURE['fields']['last_name'] assert str(adviser.dit_team_id) == ADVISER_FIXTURE['fields']['dit_team'] fixture_info = [ 'Loading fixture: [', ' {', ' "fields": {', f' "dit_team": "{ADVISER_FIXTURE["fields"]["dit_team"]}",', f' "email": "{ADVISER_FIXTURE["fields"]["email"]}",', f' "first_name": "{ADVISER_FIXTURE["fields"]["first_name"]}",', f' "last_name": "{ADVISER_FIXTURE["fields"]["last_name"]}"', ' },', ' "model": "company.advisor",', f' "pk": "{ADVISER_FIXTURE["pk"]}"', ' }', ']', ] assert caplog.messages == ['\n'.join(fixture_info)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fixture_example_data():\n import_example_data()", "def fixtures():", "def test_load_fixture_url_not_found_if_not_setup(settings, caplog):\n caplog.set_level('WARNING')\n\n settings.ALLOW_TEST_FIXTURE_SETUP = None\n response = _request_load_fixture({'fixture': [ADVISER_FIXTURE]})\n assert response.status_code == status.HTTP_404_NOT_FOUND\n\n with pytest.raises(Advisor.DoesNotExist):\n Advisor.objects.get(pk=ADVISER_FIXTURE['pk'])\n\n assert caplog.messages == [\n 'The `load_fixture` endpoint is not enabled. The ALLOW_TEST_FIXTURE_SETUP environment'\n ' variable is not set.',\n 'Not Found: /testfixtureapi/load-fixture/',\n ]", "def discovery_data(request):\n file = request.param\n p = Path(file)\n if not p.is_absolute():\n p = Path(__file__).parent / \"fixtures\" / file\n\n with open(p) as f:\n return json.load(f)", "def setUpFixture(self):\n pass", "def test_load(api):\n # upload file to file.io servers\n uploaded_file = api.upload(\n tag='test_file',\n expiry='1d',\n path='tests/test_file.txt'\n )\n\n # check that instance of FileIO has these fields\n assert uploaded_file.link\n assert uploaded_file.key\n assert uploaded_file.tag\n assert uploaded_file.path\n\n # check that the uploaded file was added to uploaded files list\n assert api.show_uploads()\n\n # check that our list is not empty\n assert api.file_obj_list\n\n # export the file in json format\n api.export('tests/exported.json')\n\n # check that exported file exists\n assert path.isfile('tests/exported.json')\n\n # set it to empty list\n api.file_obj_list = []\n\n # load the file in json format\n api.load('tests/exported.json')\n\n # remove the file\n remove('tests/exported.json')\n\n # check that the uploaded file was added to uploaded files list\n assert api.show_uploads()\n\n # check that our list is not empty\n assert api.file_obj_list\n\n # export the file in pkl format\n api.export('tests/exported.pkl')\n\n # set it to empty list\n api.file_obj_list = []\n\n # load the file in pkl format\n api.load('tests/exported.pkl')\n\n # remove exported.pkl file\n remove('tests/exported.pkl')\n\n # check that the uploaded file was added to uploaded files list\n assert api.show_uploads()\n\n # check that our list is not empty\n assert api.file_obj_list", "def test_load_data_base(self):\n pltp = get_object_or_404(Loaded_Pltp, name=\"test\")\n pl = pltp.loaded_pl_set.all()\n \n self.assertEqual(pltp.name, \"test\")\n #test json\n \n self.assertEqual(len(pl), 2)\n \n self.assertEqual(pl[0].name, \"test1\")\n #test dirname + json\n self.assertEqual(pl[1].name, \"test2\")\n #test dirname + json", "def test_load_index(self):\n\n c = Client()\n response = c.get('/taric_books/')\n\n self.assertEqual(response.status_code, 200)", "def load_fixture(filename):\n path = os.path.join(os.path.dirname(__file__), \"fixtures\", filename)\n with open(path) as fptr:\n return fptr.read()", "def test_load(self):\n command = constituencies.Command()\n command.handle('load', silent=True)", "def test_create_event_load(self):\n res = self.client.get('/create-event')\n data = res.data.decode('utf-8')\n assert res.status == '200 OK'\n assert 'Create Event' in data", "def test_load(\n mock_hvac_client_read, mock_load, localhost_client, gen_input_config, gen_processed_config, gen_vault_response_kv1\n):\n mock_hvac_client_read.return_value = gen_vault_response_kv1()\n mock_load.return_value = gen_input_config()\n\n assert localhost_client.load(\"in.json\") == gen_processed_config()\n\n mock_hvac_client_read.assert_called_with(gen_input_config()[\"vault_secrets\"][\"acme.user\"])\n mock_load.assert_called_with(\"in.json\")", "def test_loading_document(self):", "def api_response():\n return load_fixture(\"smhi.json\", DOMAIN)", "def test_specific_test_loads(self):\n response = self.app.test_client().get('/test/1')\n self.assertEqual(response.status_code, 200)\n self.assert_template_used('test/by_id.html')", "def _load_test_data(self):\n self._save_test_data()", "def test_load_order(self):\n order = OrderTest.create_order_1()\n c = Client()\n response = c.get('/lemur/order/set/' + str(order.id), follow=True)\n self.assertEqual(response.status_code, 200)", "def loadTest(app, store):\n\n @app.get(['/', '/test', '/test/route'])\n def testGet():\n \"\"\"\n Test endpoint for bottle application routes\n Shows location of this file\n Shows all routes in current bottle app\n \"\"\"\n bottle.response.set_header('content-type', 'text/plain')\n content = \"Web app file is located at %s\" % os.path.dirname(os.path.abspath(__file__))\n siteMap = \"\"\n\n for route in app.routes:\n siteMap = \"%s%s%s %s\" % (siteMap, '\\n' if siteMap else '', route.rule, route.method)\n target = route.config.get('mountpoint', {}).get('target')\n if target:\n for way in target.routes:\n siteMap = \"%s\\n %s %s\" % (siteMap, way.rule, way.method)\n\n content = \"%s\\n%s\" % (content, siteMap)\n return content\n\n @app.get(['/test/echo', '/test/echo/<action>'])\n @app.post(['/test/echo', '/test/echo/<action>'])\n def echoTest(action=None):\n \"\"\"\n Ajax test endpoint for web application service\n Echos back args as content\n \"\"\"\n # convert to json serializible dict\n result = odict(verb=bottle.request.method,\n url=bottle.request.url,\n action=action,\n query=odict(bottle.request.query.items()),\n headers=odict(bottle.request.headers.items()),\n data=bottle.request.json,\n form=odict(bottle.request.forms),\n body=bottle.request.body.read())\n\n return result\n\n @app.get(['/test/auth', '/test/auth/<token>'])\n @app.post(['/test/auth', '/test/auth/<token>'])\n def authTest(token=None):\n \"\"\"\n Auth credentials in body data as json\n or query parameters\n or token from end of url path\n or token from X-Auth-Token header\n \"\"\"\n if not token:\n token = bottle.request.get_header('X-Auth-Token')\n\n data = bottle.request.json\n if not token:\n user = data.get('user')\n password = data.get('password')\n\n query = odict(bottle.request.query.items())\n if not user or not password:\n user = query.get('user')\n password = query.get('password')\n\n if not token and (not user or not password):\n bottle.abort(400, \"Authentication credentials missing.\")\n\n result = odict(token=token,\n user=user,\n password=password,\n headers=odict(bottle.request.headers.items()),\n query=query,\n data=data,\n )\n return result\n\n @app.get('/test/stream')\n def streamTest():\n \"\"\"\n Create test server sent event stream that sends count events\n \"\"\"\n timer = StoreTimer(store, duration=2.0)\n bottle.response.set_header('Content-Type', 'text/event-stream') #text\n bottle.response.set_header('Cache-Control', 'no-cache')\n # Set client-side auto-reconnect timeout, ms.\n yield 'retry: 1000\\n\\n'\n i = 0\n yield 'id: {0}\\n'.format(i)\n i += 1\n yield 'data: START\\n\\n'\n n = 1\n while not timer.expired:\n yield 'id: {0}\\n'.format(i)\n i += 1\n yield 'data: {0}\\n\\n'.format(n)\n n += 1\n yield \"data: END\\n\\n\"\n\n return app", "def retrieve_fixture():\n j = json.load(open(\"./tests/fixtures/crond_event.json\"))\n return j", "def start_fixture(self):\n pass", "def _fixture(self):\n fdir = os.path.join(FIXTURES_DIR, 'errata.devel.redhat.com/')\n filename = self._url_with_params.replace(\n 'https://errata.devel.redhat.com/', fdir)\n # If we need to represent this API endpoint as both a directory and a\n # file, check for a \".body\" file.\n if os.path.isdir(filename):\n return filename + '.body'\n return filename", "def testApi(self):", "def setUp(self):\n self.response = self.s.get(self.url, params=self.params)", "def test_get_api_resources(self):\n pass", "def test_load_taric(self):\n\n c = Client()\n response = c.get('/taric_books/taric/')\n\n self.assertEqual(response.status_code, 200)", "def test_get(self):\n pass", "def test_main_page_load(self):\n response = self.client.get(reverse(\"index\"))\n self.assertEqual(response.status_code, 200)", "def _fixture_setup(self):\n pass", "def test_api_samples(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url())).json()\n # load samples from url specified in api base\n r = requests.get(r['samples']).json()\n self.assertIn('count', r)\n self.assertIn('next', r)\n self.assertIn('prev', r)\n self.assertIn('samples', r)", "def test_load_about(self):\n\n c = Client()\n response = c.get('/taric_books/about/')\n\n self.assertEqual(response.status_code, 200)" ]
[ "0.70628935", "0.6695335", "0.65467155", "0.6361524", "0.6320997", "0.6315157", "0.62557596", "0.61709154", "0.61625296", "0.6125355", "0.6113666", "0.6076544", "0.5988091", "0.59874165", "0.5981768", "0.59442526", "0.59401876", "0.59334683", "0.5930674", "0.5928487", "0.5926274", "0.5912856", "0.5900871", "0.58986086", "0.58864987", "0.58761317", "0.58664525", "0.5857611", "0.5846178", "0.58343744" ]
0.6883868
1
Draw all the objects in the scene
def draw_objects(): # Disable the turtle animation, and erase the scren. turtle.tracer(False) turtle.hideturtle() turtle.clear() # Draw all the parts of the scene. draw_ball() draw_target() draw_bounds() draw_pins() show_status() # Now show the screen, after everything has been drawn turtle.tracer(True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw(self):\n for obj in self.objects:\n obj.draw()", "def draw_all_objects():\n\tglobal fuel_available\n\n\tbackground_module.draw_bg(win)\n\tbackground_module.draw_snow(win)\n\tobstacles_module.draw_obstacles(win)\n\tcoins_module.draw_coins(win)\n\tforeground_module.draw_fg(win)\n\n\tfor spark_object in effects_module.Coin_spark_effects.coin_effects_list:\n\t\tspark_object.draw(win)\n\tfor hit_effect_object in effects_module.Hit_effects.hit_effects_list:\n\t\thit_effect_object.draw(win)\n\n\tif num_of_lives == 0:\n\t\tplayer_module.player.y += 1\n\t\tplayer_module.propeller.draw(win)\n\t\tplayer_module.player.draw(win)\n\telif won_bool:\n\t\tplayer_module.draw_player(win, True)\n\telse:\n\t\tplayer_module.draw_player(win)\n\t\t\n\tbird_module.draw_bird(win)\n\tdynamic_obstacle_giftbox.draw_gift(win)\n\tdynamic_obstacle_olaf.draw_olaf(win)\n\tdynamic_obstacle_santa.draw_santa(win)\n\tdisplay_module.display_lives(win, num_of_lives)\n\tdisplay_module.draw_minimap(win,frame_count)\n\n\tif start_fuel:\n\t\tfuel_available -= 1\n\tfuel_available = display_module.fuel_bar.draw_fuel_bar(win, fuel_available, start_fuel)\n\n\tdisplay_module.draw_fuel(win)\n\tcursor.draw(win)", "def draw_objects(self, view_manager):\n raise NotImplementedError(\"draw_objects can not be called directly from recoBase3D\")", "def on_draw( self ):\r\n self.clear()\r\n self.setup_3D()\r\n print \"DEBUG:\" , \"There are\" , len( self.renderlist ) , \"items in 'self.renderlist'\"\r\n for obj in self.renderlist:\r\n obj.draw()", "def draw(self):\n self.scene.draw(self.screen)", "def on_draw(self):\n\n # clear the screen to begin drawing\n arcade.start_render()\n\n # draw each object\n self.ball.draw()\n self.paddle.draw()\n\n self.draw_score()", "def draw(self):\n \n # Draw the background\n self.world.fill(BLUE)\n \n # Draw all the sprite lists that we have\n self.wall_list.draw(self.world)\n self.enemy_list.draw(self.world)\n self.sludge.draw(self.world)\n self.consumeable.draw(self.world)\n self.can_climb.draw(self.world)", "def drawall(self):\r\n for x in self.objectlist:\r\n if x.model:\r\n x.model.draw()", "def _drawFrame(self):\n\n self._clearScreen()\n \n for object in Object.Objects:\n self._drawObject(object)\n\n for entity in Entity.Entities:\n self._drawObject(entity)\n\n self._drawObject(Game.Player)", "def on_draw(self):\n\n # clear the screen to begin drawing\n arcade.start_render()\n\n # TODO: draw each object\n self.ship.draw()\n for asteroid in self.asteroid_array:\n asteroid.draw()\n\n for bullet in self.bullets_list:\n bullet.draw()", "def on_draw(self):\r\n\r\n \r\n # clear the screen to begin drawing\r\n arcade.start_render()\r\n\r\n background = arcade.load_texture(\"gala.png\")\r\n arcade.draw_texture_rectangle(SCREEN_WIDTH/2, SCREEN_HEIGHT/2,SCREEN_WIDTH , SCREEN_HEIGHT, background) \r\n \r\n\r\n for asteriod in self.rocks:\r\n asteriod.draw()\r\n \r\n # for asteriod in self.rockss:\r\n # asteriod.draw()\r\n\r\n # for asteriod in self.rocksss:\r\n # asteriod.draw() \r\n \r\n for bullet in self.bullets:\r\n bullet.draw()\r\n \r\n \r\n self.ship.draw()\r\n \r\n \r\n # TODO: draw each object\r", "def draw(self):\n self.screen.fill(BACKGROUND_COLOR)\n self.cannon.draw(self.screen)\n self.objects.draw(self.screen)", "def on_draw(self):\n\n # Clear the screen and start drawing\n arcade.start_render()\n\n # Draw the rectangles\n for shape in self.shapes:\n shape.draw()", "def draw_particles(self):\n for particle in self.particles:\n particle.draw()", "def drawAll(self):\r\n for x in range(len(self.model)):\r\n self.model[x].draw()", "def draw(self):\n self.screen.fill((0,51,102))\n # get the new drawables\n self.drawables = (self.game_model.get_background_drawables()\n + self.game_model.get_plane_drawables()\n + self.game_model.get_bullet_drawables()\n + self.game_model.get_enemy_drawables())\n for d in self.drawables:\n rect = d.get_rect()\n surf = d.get_surface()\n surf.set_colorkey((255,255,255))\n self.screen.blit(surf, rect)", "def on_draw(self):\r\n\r\n # clear the screen to begin drawing\r\n arcade.start_render()\r\n\r\n # draw each object\r\n self.rifle.draw()\r\n\r\n for bullet in self.bullets:\r\n bullet.draw()\r\n\r\n # TODO: iterate through your targets and draw them...\r\n for target in self.targets:\r\n target.draw()\r\n\r\n self.draw_score()\r\n # if the score is less than -30, game over!\r\n if self.score <= -30:\r\n self.draw_game_over()\r\n arcade.finish_render()", "def render(self, screen):\n # print(\"Drawing scene {}\".format(self.imgname))\n screen.fill(self.color)", "def display(self):\n\n self.screen.fill(self.background)\n\n for wireframe in self.wireframes.values():\n if self.displayEdges:\n for n1, n2 in wireframe.edges:\n pygame.draw.aaline(self.screen, self.edgeColour, wireframe.nodes[n1][:2], wireframe.nodes[n2][:2], 1)\n\n if self.displayNodes:\n for node in wireframe.nodes:\n pygame.draw.circle(self.screen, self.nodeColour, (int(node[0]), int(node[1])), self.nodeRadius, 0)", "def drawObjects(self, objects):\n\t\tfor o in objects:\n\t\t\tif o.visible:\n\t\t\t\tif o.rtc:\t# only horisontal center is supported currently, and only absolute values\n\t\t\t\t\thcenter = int((self.width - 1) / 2)\n\t\t\t\t\tposX = hcenter + o.x\n\t\t\t\t\tposY = o.y\n\t\t\t\telse:\n\t\t\t\t\tposX = int((o.x * self.width / 100) if type(o.x) == float else o.x)\n\t\t\t\t\tposY = int((o.y * self.height / 100) - 1 if type(o.y) == float else o.y)\n\t\t\t\t# frame\n\t\t\t\tif o.frame:\n\t\t\t\t\tfor nr, item in enumerate(o.frame):\n\t\t\t\t\t\tself.wts(posY + nr, posX + 1, item[0], item[1])\n\t\t\t\t# text\n\t\t\t\tfor nr, item in enumerate(o.content):\n\t\t\t\t\tfor coord in self.verticalBoundaries:\t# Check if a line is crossed\n\t\t\t\t\t\tif coord > posY and coord < posY + len(item[0]):\n\t\t\t\t\t\t\tif len(self.menus) == 1:\n\t\t\t\t\t\t\t\titem[0] = item[0][:coord - posY - 2] + '..'\n\t\t\t\t\ttry:\n\t\t\t\t\t\tself.wts(posY + nr + 1, posX + 2, item[0], item[1])\n\t\t\t\t\texcept:\n\t\t\t\t\t\tself.exit('Error occured in drawObjects, while drawing : OBJECT= \"' + str(o.content) + '\" ITEM= \"' + str(item)) + '\"'\n\t\treturn True", "def drawScene(self):\n glBegin(GL_LINES)\n # draw axes\n glColor3f(1, 0, 0)\n glVertex3f(0, 0, 0)\n glVertex3f(self.worldSize / 2, 0, 0)\n glColor3f(0, 1, 0)\n glVertex3f(0, 0, 0)\n glVertex3f(0, self.worldSize / 2, 0)\n glColor3f(0, 0, 1)\n glVertex3f(0, 0, 0)\n glVertex3f(0, 0, self.worldSize / 2)\n # draw bounding box\n glColor3f(1, 1, 1)\n scalar = (self.worldSize - 1) / 2\n for x in [-1, 1]:\n for y in [-1, 1]:\n for z in [-1, 1]:\n glVertex3f(scalar * x, scalar * y, scalar * z)\n for z in [-1, 1]:\n for x in [-1, 1]:\n for y in [-1, 1]:\n glVertex3f(scalar * x, scalar * y, scalar * z)\n for y in [-1, 1]:\n for z in [-1, 1]:\n for x in [-1, 1]:\n glVertex3f(scalar * x, scalar * y, scalar * z)\n glEnd()\n # draw spheres if in POINTS mode\n if self.displayMode is self.DISPLAYMODE_POINTS:\n prev = (0, 0, 0)\n offset = int(self.worldSize / 2)\n for x in range(self.worldSize):\n for y in range(self.worldSize):\n for z in range(self.worldSize):\n glTranslatef(x - offset - prev[0], y - offset - prev[1], z - offset - prev[2])\n # use threshold for black/white coloring\n if self.world[x][y][z] > self.worldThreshold:\n glColor3f(1, 1, 1)\n else:\n glColor3f(0, 0, 0)\n gluSphere(self.sphere, 0.1, 8, 4)\n prev = (x - offset, y - offset, z - offset)\n # draw mesh if in MESH mode\n elif self.displayMode is self.DISPLAYMODE_MESH:\n offset = int(self.worldSize / 2)\n for x in range(self.worldSize - 1):\n for y in range(self.worldSize - 1):\n for z in range(self.worldSize - 1):\n if self.polygons[x][y][z]:\n glBegin(GL_POLYGON)\n glColor3f(x / self.worldSize, y / self.worldSize, z / self.worldSize)\n for vertex in self.polygons[x][y][z]:\n glVertex3f(x + vertex[0] - offset, y + vertex[1] - offset, z + vertex[2] - offset)\n glEnd()\n # draw wireframe in in WIRE mode\n elif self.displayMode is self.DISPLAYMODE_WIREFRAME:\n offset = int(self.worldSize / 2)\n for x in range(self.worldSize - 1):\n for y in range(self.worldSize - 1):\n for z in range(self.worldSize - 1):\n glBegin(GL_LINES)\n glColor3f(x / self.worldSize, y / self.worldSize, z / self.worldSize)\n for vertex in self.polygons[x][y][z]:\n glVertex3f(x + vertex[0] - offset, y + vertex[1] - offset, z + vertex[2] - offset)\n glEnd()\n # draw background in the distance\n glLoadIdentity()\n glBegin(GL_QUADS)\n glColor3f(59 / 256, 102 / 256, 212 / 256)\n glVertex3f(-30, -23, -49.5)\n glVertex3f(30, -23, -49.5)\n glColor3f(184 / 256, 201 / 256, 242 / 256)\n glVertex3f(30, 23, -49.5)\n glVertex3f(-30, 23, -49.5)\n glEnd()\n # HUD in white\n glColor3f(1, 1, 1)\n # lower left\n glWindowPos2f(10, 10)\n for ch in 'WASD: Rotate':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n glWindowPos2f(10, 25)\n for ch in 'Wheel: Thresh':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n glWindowPos2f(10, 40)\n for ch in 'R: Randomize':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n glWindowPos2f(10, 55)\n for ch in 'O: Object':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n glWindowPos2f(10, 70)\n for ch in 'I: Wireframe':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n glWindowPos2f(10, 85)\n for ch in 'P: Points':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n # upper right\n glWindowPos2f(self.displaySize[0] - 118, self.displaySize[1] - 25)\n for ch in 'Thresh: %0.2f' % self.worldThreshold:\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))", "def pygame_render(objects_to_draw: DrawingObjects, surface):\n\n for arc in objects_to_draw.arcs:\n pygame.draw.arc(surface, arc.color, arc.enclosing_rect, arc.angle_begin, arc.angle_end, arc.lines_thickness)\n for rect in objects_to_draw.rects:\n pygame.draw.rect(surface, rect.color, rect.shape, rect.lines_thickness)\n for circle in objects_to_draw.circles:\n pygame.draw.circle(surface, circle.color, circle.center, circle.radius, circle.line_thickness)\n for a_line in objects_to_draw.lines:\n pygame.draw.line(surface, a_line.color, a_line.begin, a_line.end, a_line.line_thickness)", "def draw(self, window):\n \n window.fill(Colours.BG)\n\n for draw_obj in self.objects.values():\n draw_obj.draw(window)\n\n #Ensure text is drawn on top\n self.objects['introtext'].draw(window)\n self.objects['introtext2'].draw(window)\n self.objects['introtext3'].draw(window)\n \n if self.banner is not None:\n self.banner.draw(window)\n \n pygame.display.flip()", "def draw(self):\n self.screen.fill(pygame.Color(0,0,0))\n for brick in self.model.bricks:\n pygame.draw.rect(self.screen, brick.color, pygame.Rect(brick.x,brick.y,brick.width,brick.height))\n pygame.draw.rect(self.screen, pygame.Color(255,255,255), pygame.Rect(self.model.paddle.x,self.model.paddle.y,self.model.paddle.width,self.model.paddle.height))\n pygame.draw.ellipse(self.screen, pygame.Color(128,128,128),(self.model.ball.x-self.model.ball.r, self.model.ball.y-self.model.ball.r, 2*self.model.ball.r,2*self.model.ball.r))\n pygame.display.update()", "def draw(self):\n for section in self.sections:\n canvas_reset(self.canvas)\n section.draw(self.canvas)", "def draw(self):\n\n for item in self.vis:\n item.undraw()\n self.render()\n for item in self.vis:\n item.draw(self.win)\n self.drawn = True", "def on_draw(self):\n # Clearing the buffers\n self.clear()\n self.set3d()\n # Makes it so color can be added\n glColor3d(1, 1, 1)\n\n self.push(self.player.pos, self.player.rot)\n self.model.draw()\n glPopMatrix()\n self.model.process_queue_slowly()\n\n # Draws the crosshairs on the screen\n self.set2d()\n self.draw_position_label()\n self.draw_reticle()", "def draw():\n clear()\n\n for target in targets:\n goto(target.x, target.y)\n dot(20, \"blue\")\n\n if inside(ball):\n goto(ball.x, ball.y)\n dot(6, \"red\")\n\n update()", "def draw_scene():\n # Place the camera\n camera.placeCamera()\n \n \n # Set up the global ambient light. (Try commenting out.)\n amb = [ 0*brightness, 0*brightness, 0*brightness, 1.0 ]\n glLightModelfv(GL_LIGHT_MODEL_AMBIENT, amb)\n\n # Set up the main light (LIGHT0)... or not.\n if is_light_on:\n place_blue_light()\n place_red_light()\n place_green_light()\n place_lamp_light()\n else:\n glDisable(GL_LIGHT0)\n glDisable(GL_LIGHT1)\n glDisable(GL_LIGHT2)\n glDisable(GL_LIGHT3)\n\n if lamp_light:\n place_lamp_light()\n else:\n glDisable(GL_LIGHT3)\n\n if headlamp_is_on:\n place_headlamp_light()\n else:\n glDisable(GL_LIGHT4)\n\n # Now spin the world around the y-axis (for effect).\n glRotated(angle_movement, 0, 1, 0)\n draw_objects()", "def draw(self): \n [component.draw() for component in self.components]" ]
[ "0.8355407", "0.77330977", "0.74636686", "0.74551225", "0.74239063", "0.73782027", "0.73662746", "0.7353718", "0.73440975", "0.7299292", "0.7276649", "0.72509795", "0.7222594", "0.71387583", "0.7134472", "0.69717264", "0.69685745", "0.6954083", "0.6932343", "0.6887796", "0.6864576", "0.68557644", "0.6854374", "0.6834133", "0.6833142", "0.68099743", "0.67889684", "0.6780607", "0.6686312", "0.66862196" ]
0.81850326
1
Draw the ball at its current position
def draw_ball(): draw_circle(ball, 'yellow')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_ball(self):\n circle(screen, self.color, (self.x, self.y), self.r)", "def draw(self, ball):\n self.screen.fill(pygame.Color(0,0,0))\n self.screen.blit(ball.surf, (size[0]/2, size[1]/2)) #this puts it in roughly center of screen\n #add code to draw paddles and ball\n #puts the new visuals on the screen\n pygame.display.update()", "def draw(self):\n self.ball_sprite.draw()", "def drawBall(self, view):\n self._ball.draw(view)", "def draw(self):\n arcade.draw_circle_filled(self.center.x, self.center.y, BALL_RADIUS, BALL_COLOR)\n return", "def draw_self(self):\n self.ball_sprite.draw()", "def show_ball(self, screen, fgColor):\r\n pygame.draw.circle(screen, fgColor, (self.x, self.y), self.Radius)", "def draw(self):\r\n arcade.draw_circle_filled(self.center.x, self.center.y, BULLET_RADIUS, BULLET_COLOR)", "def draw(self):\n pygame.draw.circle(screen, self.color, (int(self.x), int(self.y)),\n self.radius)", "def _draw(self):\n\n pygame.draw.circle(self.display, self.colour,\n self.pos.astype(int),\n self.rad)\n pygame.draw.circle(self.display, black,\n self.pos.astype(int),\n self.rad, 2)", "def draw(self):\r\n arcade.draw_circle_filled(self.center.x, self.center.y, self.radius, TARGET_COLOR)", "def draw(self):\n arcade.draw_circle_filled(self.position_x, self.position_y, self.radius,self.player_color)", "def draw(self, screen):\n\t\tpygame.draw.circle(screen, self.color, self.pos, self.radius)", "def render(self, screen):\n x,y = self.getBallPos()\n pygame.draw.circle(screen, (255, 255, 255), (x, y), self.RADIUS)", "def draw( self ):\n\t\t\t\n\t\ttransposition = lambda point: (point[0] + WINDOW_X, WINDOW_Y - point[1])\n\t\t\t \n\t\tx, y = transposition( self.position.xy )\n\t\tpygame.draw.circle(self.screen, self.color, ( int(x + 0.5), int(y + 0.5) ), self.r)", "def draw(self):\n self.screen.fill(pygame.Color(0,0,0))\n for brick in self.model.bricks:\n pygame.draw.rect(self.screen, brick.color, pygame.Rect(brick.x,brick.y,brick.width,brick.height))\n pygame.draw.rect(self.screen, pygame.Color(255,255,255), pygame.Rect(self.model.paddle.x,self.model.paddle.y,self.model.paddle.width,self.model.paddle.height))\n pygame.draw.ellipse(self.screen, pygame.Color(128,128,128),(self.model.ball.x-self.model.ball.r, self.model.ball.y-self.model.ball.r, 2*self.model.ball.r,2*self.model.ball.r))\n pygame.display.update()", "def __draw_line(display, color, ball_pos, dx, dy):\n pygame.draw.line(display, color, ball_pos, (ball_pos[0] + dx, ball_pos[1] + dy), 2)", "def draw():\n clear()\n\n for target in targets:\n goto(target.x, target.y)\n dot(20, \"blue\")\n\n if inside(ball):\n goto(ball.x, ball.y)\n dot(6, \"red\")\n\n update()", "def draw(self):\n radius = self.width / 2\n center_x = self.x + radius\n center_y = self.y + radius\n arcade.draw_circle_filled(center_x, center_y, radius, self.fill.color)\n arcade.draw_circle_outline(\n center_x, center_y, radius, self.pen.color, 3)", "def draw(self):\n arcade.draw_rectangle_filled(self.center.x, self.center.y, PADDLE_WIDTH, PADDLE_HEIGHT, PADDLE_COLOR)\n pass", "def draw(self):\r\n arcade.draw_rectangle_filled(self.center.x, self.center.y, self.radius, self.radius, TARGET_SAFE_COLOR)", "def draw(self):\n if self.visible:\n glColor3f(self.r, self.g, self.b)\n graphicsBall(self.x, self.y, self.radius)\n\n if self.number <= 8:\n glColor3f(1.0, 1.0, 1.0)\n else:\n glColor3f(0.0, 0.0, 0.0)\n\n graphicsBall(self.x, self.y, self.radius / 2)\n\n if self.number > 0:\n if self.number > 8:\n glColor3f(1.0, 1.0, 1.0)\n else:\n glColor3f(0.0, 0.0, 0.0)\n\n if self.number < 10:\n graphicsText(self.x - 2, self.y - 3.5, str(self.number))\n else:\n graphicsText(self.x - 4.5, self.y - 3.5, str(self.number))", "def render(self, game):\n pygame.draw.circle(game.screen,\n self.colour,\n (int(self.x), int(self.y)), self.r)", "def draw(self):\n arcade.draw_rectangle_outline(self.position_x, self.position_y, self.radius, self.color)", "def draw(self):\n self._view.clear()\n for h in range(len(self._bricks)):\n self._bricks[h].draw(self._view)\n \n self._paddle.draw(self._view)\n \n for w in self._FP_list:\n w.draw(self._view)\n \n # draw ball if not None\n if not self._ball is None:\n self._ball.draw(self._view)", "def draw(self, screen):\n # Snow is circular in this case... ;-)\n pygame.draw.circle(screen, self.color, (int(self.x), int(self.y)),\n self.size)\n # now update things for the next loop\n # update the x position\n self.x += self.dx\n # update the y position\n self.y += self.dy\n # bounds checking\n if self.x < 0 or self.x > size_x:\n # floated off the edge of the screen so do a reset\n self.x = random.randrange(size_x)\n self.y = 0\n self.dy = random.randrange(1, 30) + random.random()\n if (self.y > size_y):\n # floated off the bottom of the screen so drift again from the top\n self.x = random.randrange(size_x)\n self.y = 0\n self.dy = random.randrange(1, 30) + random.random()", "def render(self,screen):\n pygame.draw.circle(screen, (255,0,0), self.circlePos.int(),self.circleRad,0)\n tipPt = self.circlePos + 5 * self.circleVel.normalized()\n perpVector = math3d.VectorN(-self.circleVel[1], self.circleVel[0]).normalized()\n rsidePt = self.circlePos + 5 * perpVector\n lsidePt = self.circlePos + 5 *-perpVector\n\n pygame.draw.line(screen, (255,255,255), self.circlePos, tipPt)\n pygame.draw.line(screen, (255,255,255), self.circlePos, rsidePt)\n pygame.draw.line(screen,(255,255,255), self.circlePos, lsidePt)\n pygame.draw.polygon(screen,(255,255,255),(tipPt,rsidePt,lsidePt),3)", "def draw(self):\n arcade.draw_xywh_rectangle_filled(\n self.x, self.y, self.width, self.height, self.fill.color\n )\n arcade.draw_xywh_rectangle_outline(\n self.x, self.y, self.width, self.height, self.pen.color, 3\n )", "def move_ball(self):\r\n self.canvas.move(self.ball, (self.x_speed * self.speed), (self.y_speed * self.speed))\r\n (leftPos, topPos, rightPos, bottomPos) = self.canvas.coords(self.ball)\r\n if leftPos <= 0 or rightPos >= 400:\r\n self.x_speed = -self.x_speed\r\n if topPos <= 0 or bottomPos >= 400:\r\n self.y_speed = -self.y_speed", "def draw(self):\n arcade.draw_rectangle_filled(self.center.x,\n self.center.y,\n self.width,\n self.height,\n arcade.color.WHITE)" ]
[ "0.86652905", "0.79869777", "0.7910737", "0.78856057", "0.77881765", "0.76972806", "0.76524436", "0.7610093", "0.76025486", "0.75768846", "0.7515461", "0.7492722", "0.7467942", "0.7458139", "0.7410807", "0.7403724", "0.7402274", "0.73503107", "0.7325099", "0.73093575", "0.72478855", "0.71841884", "0.7146884", "0.71121764", "0.70169896", "0.70063967", "0.7004244", "0.6953542", "0.6929644", "0.69049567" ]
0.80440485
1
Draw the bounding rectangle.
def draw_bounds(): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_bounding_box(self):\n # Gets the bounding box\n xmin, ymin, xmax, ymax = self.get_bounding_box()\n\n # Gets the actual coordinates\n width = xmax - xmin\n height = ymax - ymin\n center_x = xmin + (width)/2\n center_y = ymin + (height)/2\n\n arcade.draw_rectangle_outline(center_x, center_y, width, height, (255, 0, 0))", "def drawRectangle(x, y, width, height):\n pen1.up()\n pen1.goto(x, y)\n pen1.down()\n pen1.fd(width)\n pen1.right(90)\n pen1.fd(height)\n pen1.right(90)\n pen1.fd(width)\n pen1.right(90)\n pen1.fd(height)", "def draw_rect(self, x, y, w, h, color=None):\n self._draw_fast_hline(x, y, w, color)\n self._draw_fast_hline(x, y + h - 1, w, color)\n self._draw_fast_vline(x, y, h, color)\n self._draw_fast_vline(x + w - 1, y, h, color)", "def DrawRectangle(*args, **kwargs):\n return _gdi_.GraphicsContext_DrawRectangle(*args, **kwargs)", "def draw(self):\n if self.master != None :\n outline = Cell.FILLED_COLOR_BORDER if self.fill else Cell.EMPTY_COLOR_BORDER\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = self.fill, outline = outline)", "def DrawRectangle(*args, **kwargs):\n return _gdi_.DC_DrawRectangle(*args, **kwargs)", "def drawRectWithBorder(screen, bColor, fColor, posX, posY, height, width, bWidth):\n \n #draw outline rect \n pygame.draw.rect(screen, bColor, (posX, posY, height, width))\n #draw fill rect\n pygame.draw.rect(screen, fColor, (posX + bWidth, posY + bWidth, height - bWidth * 2, width - bWidth * 2))", "def DrawRectangleRect(*args, **kwargs):\n return _gdi_.DC_DrawRectangleRect(*args, **kwargs)", "def draw_rect(x, y, width, height):\r\n global _canvas\r\n global _current_color\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n rect = Rectangle(width, height, Point(x+width/2, y+height/2))\r\n _set_not_filled(rect)\r\n _canvas.add(rect)", "def DrawRectangleRect(*args, **kwargs):\n return _gdi_.PseudoDC_DrawRectangleRect(*args, **kwargs)", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def draw_bounding_boxes(display, bounding_boxes):\n\n bb_surface = pygame.Surface((VIEW_WIDTH, VIEW_HEIGHT))\n bb_surface.set_colorkey((0, 0, 0))\n for bbox in bounding_boxes:\n points = [(int(bbox[i, 0]), int(bbox[i, 1])) for i in range(8)]\n # draw lines\n # base\n pygame.draw.line(bb_surface, BB_COLOR, points[0], points[1])\n pygame.draw.line(bb_surface, BB_COLOR, points[1], points[2])\n pygame.draw.line(bb_surface, BB_COLOR, points[2], points[3])\n pygame.draw.line(bb_surface, BB_COLOR, points[3], points[0])\n # top\n pygame.draw.line(bb_surface, BB_COLOR, points[4], points[5])\n pygame.draw.line(bb_surface, BB_COLOR, points[5], points[6])\n pygame.draw.line(bb_surface, BB_COLOR, points[6], points[7])\n pygame.draw.line(bb_surface, BB_COLOR, points[7], points[4])\n # base-top\n pygame.draw.line(bb_surface, BB_COLOR, points[0], points[4])\n pygame.draw.line(bb_surface, BB_COLOR, points[1], points[5])\n pygame.draw.line(bb_surface, BB_COLOR, points[2], points[6])\n pygame.draw.line(bb_surface, BB_COLOR, points[3], points[7])\n display.blit(bb_surface, (0, 0))", "def rectDraw(rect):\n ax = plt.gca()\n w = max(0, rect[1] - rect[0])\n h = max(0, rect[3] - rect[2])\n ax.add_patch(patches.Rectangle((rect[0], rect[2]), w, h, edgecolor='g', facecolor='none'))", "def draw(self):\n arcade.draw_rectangle_filled(self.center.x,\n self.center.y,\n self.width,\n self.height,\n arcade.color.WHITE)", "def draw(self):\r\n arcade.draw_rectangle_filled(self.center.x, self.center.y, self.radius, self.radius, TARGET_SAFE_COLOR)", "def draw(self):\n arcade.draw_rectangle_outline(self.position_x, self.position_y, self.radius, self.color)", "def DrawRectangle(*args, **kwargs):\n return _gdi_.PseudoDC_DrawRectangle(*args, **kwargs)", "def draw(self):\n if self.master != None :\n fill = self.fill\n #fill = Cell.FILLED_COLOR_BG\n outline = Cell.EMPTY_COLOR_BORDER\n\n #if not self.fill:\n # fill = Cell.EMPTY_COLOR_BG\n # outline = Cell.EMPTY_COLOR_BORDER\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = fill, outline = outline)", "def _boundRect(self):\n addresstamp = reduce(lambda x, y: x + y, [v.addresstamp for v in self.footprints])\n self.upperleft = list(map(min, zip(*addresstamp)))\n self.bottomright = list(map(max, zip(*addresstamp)))\n self.upperright = [self.bottomright[0], self.upperleft[1]]\n self.bottomleft = [self.upperleft[0], self.bottomright[1]]\n (self.width, self.height) = (self.upperright[0] - self.bottomleft[0], self.bottomleft[1] - self.upperright[1])\n assert self.width >= 0\n assert self.height >= 0\n self.center = [self.upperleft[0] + self.width / float(2), self.upperleft[1] + self.height / float(2)]\n self.corners = [self.upperright, self.bottomleft, self.upperleft, self.bottomright]", "def draw(self, win):\n pygame.draw.rect(win, self.color, self.rect)", "def draw_rect(self, color, position, size, border_width = 0, anchor= 'topleft'):\n # We'll try to make sure that everything is okay later\n \n color = spyral.color._determine(color)\n offset = self._calculate_offset(anchor, size)\n pygame.draw.rect(self._surf, color, (position + offset, size), border_width)", "def draw(self):\n arcade.draw_xywh_rectangle_filled(\n self.x, self.y, self.width, self.height, self.fill.color\n )\n arcade.draw_xywh_rectangle_outline(\n self.x, self.y, self.width, self.height, self.pen.color, 3\n )", "def draw_rectangle(self, roi, color, thickness=2):\n top_left = self._format_point(Point(roi[0], roi[1]))\n bottom_right = self._format_point(Point(roi[2], roi[3]))\n opencv.rectangle(self.img, top_left.tuple(), bottom_right.tuple(), color.bgra(), thickness=thickness)", "def drawRectangle(img, rect, color):\n \n if len(rect) is not 4:\n # TODO throw error\n return;\n rect = rect * DISPLAY_SCALE;\n x1, y1, x2, y2 = rect.astype(numpy.int32);\n cv2.rectangle(img, (x1, y1), (x2, y2), color, 2);", "def draw(self, frame):\n left, right, top, bottom = self.box['left'], self.box['right'], self.box['top'], self.box['bottom']\n text = '{}: {:.2f} ({:.2f} m)'.format(self.label, self.confidence, self.distance)\n\n # Draw label\n text_size, baseline = cv.getTextSize(text, cv.FONT_HERSHEY_SIMPLEX, 0.5, 1)\n top = max(top, text_size[1])\n\n cv.rectangle(frame, (left, top - text_size[1]), (left + text_size[0], top + baseline), (255, 255, 255), cv.FILLED)\n cv.putText(frame, text, (left, top), cv.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 0))\n\n # Draw bounding box\n cv.rectangle(frame, (left, top), (right, bottom), (0, 255, 0))", "def draw_rectangle(self, can_page, x_start, y_start, width_rect, height_rect, line_width, stroke_color, fill_color,\n dash_style, stroke_mode, fill_mode, text_color):\n # ログ\n log.debug(self)\n try:\n can_page.setLineWidth(line_width)\n can_page.setStrokeColor(stroke_color)\n if fill_color is None:\n fill_mode = 0\n else:\n can_page.setFillColor(fill_color)\n can_page.setDash(dash_style)\n can_page.rect(x_start, y_start, width_rect, height_rect, stroke=stroke_mode, fill=fill_mode)\n can_page.setFillColor(text_color, alpha=None)\n except:\n # 例外処理\n # log.error(traceback.format_exc())\n constant.get_error(constant.ERROR_003)", "def drawRectangle(img, top_left, bottom_right, color = (0,0,255), thickness = 3):\n\tcv2.rectangle(img, top_left, bottom_right, color, thickness)", "def __draw(self, screen):\n\n pygame.draw.rect(screen, (200, 255, 200), (self.x, self.y, self.width, self.height))", "def draw_round_rect(self, x, y, w, h, r, color=None, aa=False):\n self._draw_fast_hline(x + r, y, w - 2 * r, color, aa) # Top\n self._draw_fast_hline(x + r, y + h - 1, w - 2 * r, color, aa) # Bottom\n self._draw_fast_vline(x, y + r, h - 2 * r, color, aa) # Left\n self._draw_fast_vline(x + w - 1, y + r, h - 2 * r, color, aa) # Right\n # draw four corners\n self._draw_circle_helper(x + r, y + r, r, 1, color)\n self._draw_circle_helper(x + w - r - 1, y + r, r, 2, color)\n self._draw_circle_helper(x + w - r - 1, y + h - r - 1, r, 4, color)\n self._draw_circle_helper(x + r, y + h - r - 1, r, 8, color)", "def rect(self, x, y, w, h, color):\n self.hline(x, y, w, color)\n self.vline(x, y, h, color)\n self.vline(x + w - 1, y, h, color)\n self.hline(x, y + h - 1, w, color)" ]
[ "0.83684695", "0.7127851", "0.71239257", "0.7029944", "0.7016083", "0.6983453", "0.6905837", "0.68964565", "0.6860532", "0.6849282", "0.6847225", "0.68461347", "0.6814617", "0.6795457", "0.6784434", "0.677645", "0.6746962", "0.6722924", "0.67197233", "0.66908365", "0.66871804", "0.66720283", "0.6661344", "0.66526985", "0.6614965", "0.6603991", "0.65758437", "0.65730006", "0.65646845", "0.65504205" ]
0.7149721
1
Draw all the pins.
def draw_pins(): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_pins(self, data):\n # Here we set defaults (with 'or' keyword ...)\n ax = self.ax\n plot_pins = self.plot_pins\n plot_pins_values = self.plot_pins_values\n #plot_pins_method = self.plot_pins_method or \"highlight\"\n plot_pins_colors = self.plot_pins_colors\n\n # Here we do checks and stop drawing pins if something is unset\n if ax is None: return\n if plot_pins is None: return\n \n verbose=self.verbose\n\n no_of_pins = len(self.plot_pins)\n\n if self.plot_pins_method == \"highlight\":\n\n for pin_idx in range(no_of_pins): # For every pin number (0,1,2,3)\n\n if plot_pins[pin_idx] == True: # If we want them plotted\n \n hold_times = self.hold_times_obj.identify_hold_times(pin_idx, plot_pins_values[pin_idx], data.gpio)\n\n if hold_times is not None:\n for ht in hold_times:\n axvsp = ax.axvspan(ht[0], ht[1], color=plot_pins_colors[pin_idx], alpha=0.25)\n self.axvspans[pin_idx].append(axvsp)\n\n x_halfway = (ht[1] - ht[0]) / 4 + ht[0]\n y_halfway = (self.plot_ymax - self.plot_ymin) / 2 + self.plot_ymin\n annon = ax.annotate(str(self.iterations[pin_idx] + 1), xy=(x_halfway, y_halfway))\n self.annotations[pin_idx].append(annon)\n \n self.iterations[pin_idx] += 1\n\n # TODO: The start and stop indexes of the data points that are area of interest\n # might be more useful for an averaging function, but currently the plot uses\n # the coordinates of the X axis(the start/stop timestamps) in order to highlight\n # the areas of interest.\n self.preprocessed_averages_data[pin_idx].append((self.iterations[pin_idx], ht, 0, None))\n \n # This should be in update_plot()\n self.ax.set_title(\n f\"Logging. Collected {len(data.power)} power samples and {len(data.gpio)} gpio samples.\")\n\n elif self.plot_pins_method == \"line\":\n extend_gpio = data.gpio.timestamps[-1] < data.power.timestamps[-1]\n for pin, plot_pin in enumerate(self.plot_pins):\n if plot_pin:\n self.ln_pins[pin].set_xdata(\n data.gpio.timestamps + extend_gpio * [data.power.timestamps[-1]])\n self.ln_pins[pin].set_ydata(\n data.gpio.get_select_in_value(pin) + extend_gpio * [data.gpio.values[-1][pin]])\n self.ax.set_title(f\"Logging. Collected {len(data.power)} power samples and {len(data.gpio)} gpio samples.\")\n self.fig.show()\n else:\n raise ValueError(f\"Unrecognized plot_pins_method: {self.plot_pins_method}\")", "def draw(self):\n\n self.updateLazyImageLoading()\n\n image(self.baseMap, 0, 0)\n\n for layer in self.layers:\n layer.draw()\n\n for marker in self.markers:\n marker.draw()", "def draw(self):\n for section in self.sections:\n canvas_reset(self.canvas)\n section.draw(self.canvas)", "def draw(self):\n self.draw_occupied_cells()\n self.draw_open_cells()\n self.draw_edges()\n plt.xlabel(\"Red\")\n plt.ylabel(\"Black\")\n plt.title('Hex')\n self.camera.snap()", "def draw(self):\n for obj in self.objects:\n obj.draw()", "def on_draw(self):\n # draw everything", "def draw(self):\n #for (x, y) in self.coords:\n # pyxel.rect(\n # (x + self.x) * 4,\n # (y + self.y) * 4,\n # (x + self.x) * 4 + 3,\n # (y + self.y) * 4 + 3,\n # self.color)", "def drawAll(self):\r\n for x in range(len(self.model)):\r\n self.model[x].draw()", "def draw(self):\n self.screen.fill(WHITE)\n self.color_invalid()\n self.draw_selected()\n self.shade_locked_cells()\n self.draw_grid()\n self.draw_buttons()\n self.draw_numbers()", "def draw(self):\n self._view.clear()\n for h in range(len(self._bricks)):\n self._bricks[h].draw(self._view)\n \n self._paddle.draw(self._view)\n \n for w in self._FP_list:\n w.draw(self._view)\n \n # draw ball if not None\n if not self._ball is None:\n self._ball.draw(self._view)", "def draw_arrows(self):\n for arrow in self.arrows:\n arrow.draw(self)", "def draw(self):\n self.drawLine()\n\n for l in range(0, self.height):\n print(\"|\", end='', flush=True)\n for c in range(0, self.width):\n print(\" \" + str(self.grid[l][c]) + \" |\", end='', flush=True)\n print(\"\\n\", end='', flush=True)\n\n self.drawLine()", "def draw_grid(self):\n self.screen.draw_many_tiles(tile for tile in self.iter_grid_tiles())\n pass", "def draw( self ):\n\n if self.__drawnGrid == 0:\n draw_grid().draw()\n\n self.__drawnGrid = 1\n\n column = 0\n row = 0\n i = 0\n for mark in self.__grid:\n if row == 0:\n turtle.goto(-60+60*column, 60)\n elif row == 1:\n turtle.goto(-60+60*column, 0)\n elif row == 2:\n turtle.goto(-60+60*column, -60)\n\n if isinstance(mark, str):\n if mark.lower() == 'x': \n drawX(i)\n elif mark.lower() == 'o':\n drawO(i)\n\n column += 1\n\n if column == 3:\n column = 0\n row += 1\n\n i+=1\n\n turtle.goto(-60, 60)", "def redrawAll(self):\n self.canvas.delete(ALL)\n self.gridBG = []\n self.gridBusy = []\n for row in range(self.rows):\n self.gridBG.append([])\n self.gridBusy.append([])\n for col in range(self.cols):\n self.gridBG[row].append(self.drawCell(row, col, self.colors['idle'], bgPattern=\"gray75\"))\n self.gridBusy[row].append(0)\n for row in range(self.rows + 1):\n self.canvas.create_line(\n self.margin,\n self.margin + row * self.cellSize,\n self.margin + self.cols * self.cellSize,\n self.margin + row * self.cellSize,\n dash=(self.dashBlack, self.dashWhite)\n )\n for col in range(self.cols + 1):\n self.canvas.create_line(\n self.margin + col * self.cellSize,\n self.margin,\n self.margin + col * self.cellSize,\n self.margin + self.rows * self.cellSize,\n dash=(self.dashBlack, self.dashWhite)\n )", "def _draw_players(self):\n for player in self.players:\n player.draw()", "def draw_particles(self):\n for particle in self.particles:\n particle.draw()", "def drawGrid(self):\n\n if self.orientation == \"isometric\":\n for vline in range(0, self.map_array.shape[0]):\n line = self.canvas.create_line(iso(vline*self.cell_width, 0),\n iso(vline*self.cell_width, self.map_array.shape[0]*self.cell_height))\n self.canvas_objects.append(line)\n\n for hline in (range(0, self.map_array.shape[1])):\n line = self.canvas.create_line(iso(0, hline*self.cell_height),\n iso(self.map_array.shape[1]*self.cell_width, hline*self.cell_height))\n self.canvas_objects.append(line)\n self.canvas.bind(\"<Button-1>\", self.paintCells)\n self.canvas.bind(\"<Enter>\", self.drawFrame)\n self.canvas.bind(\"<Leave>\", self.killFrame)\n self.canvas.bind(\"<Motion>\", self.showFrame)", "def draw(self, grille):\n grille.clear_highlight()\n for x in range(8):\n for y in range(8):\n self.draw_c(x, y, grille)", "def _drawRays(self):\r\n for rayID, ray in self.rayDict.items():\r\n ray.drawPath()", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self): \n [component.draw() for component in self.components]", "def draw(self):\r\n self.canvas.delete(tk.ALL)\r\n\r\n # Draw the points.\r\n radius = 2\r\n for point in self.points:\r\n x0 = point[0] - radius\r\n y0 = point[1] - radius\r\n x1 = x0 + 2 * radius\r\n y1 = y0 + 2 * radius\r\n self.canvas.create_oval(x0, y0, x1, y1, fill=\"red\", outline=\"red\")\r\n\r\n # If we have a solution, draw it.\r\n if self.solved:\r\n curve = []\r\n for x in range(self.canvas.winfo_width()):\r\n curve.append((x, F(self.a_values, x)))\r\n self.canvas.create_line(curve, fill=\"blue\")", "def draw(self):\n for x in range(self.numRows):\n print self.grid[x]" ]
[ "0.71605074", "0.69108456", "0.67938644", "0.6536287", "0.65061176", "0.6492908", "0.64312756", "0.64153045", "0.64115673", "0.6386151", "0.63729346", "0.6363726", "0.633303", "0.6317614", "0.63137263", "0.6312137", "0.6292693", "0.62355477", "0.623256", "0.6231849", "0.61988556", "0.61988556", "0.61988556", "0.61988556", "0.61988556", "0.61988556", "0.61988556", "0.61988556", "0.6168119", "0.61122483" ]
0.79720926
0
Check if ball reached target. If it did, the game is won.
def check_ball_on_target(): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_hit(self):\n too_close_x = (PADDLE_WIDTH / 2) + BALL_RADIUS\n too_close_y = (PADDLE_HEIGHT / 2) + BALL_RADIUS\n\n if (abs(self.ball.center.x - self.paddle.center.x) < too_close_x and\n abs(self.ball.center.y - self.paddle.center.y) < too_close_y and\n self.ball.velocity.dx > 0):\n # we are too close and moving right, this is a hit!\n self.ball.bounce_horizontal()\n self.score += SCORE_HIT", "async def is_target_reached(self) -> bool: # type: ignore\n ...", "def hit_paddle(self):\n pass\n\n #Implement if collision with paddle is detected\n\n #Add randomness to how ball direction will change and return value", "def isInGoal(self):\n coordx= self.playerPos.x\n coordy= self.playerPos.y\n target = 0 if self.id_team == 1 else 1\n\n if((((target == 0)and (coordx<=5))|\n ((target == 1) and(coordx>145))) \n and (coordy<=50 and coordy>=40)):\n return True\n else:\n return False", "def is_hit(ball, r_ball, v, target, r_target):\n v_norm = norm_2d(v)\n dr = (target[0] - ball[0], target[1] - ball[1])\n dr_norm = norm_2d(dr)\n\n p = project(dr, v)\n p_norm = norm_2d(p)\n\n if p_norm > v_norm:\n c = (v_norm ** 2 + dr_norm ** 2 - 2 * sc_mul(v, dr)) ** 0.5\n return c <= r_ball + r_target\n\n h = get_point_line_distance(target, ball, (-v[1], v[0]))\n return abs(h) <= r_ball + r_target", "def checkball(self):\r\n for ball in self.overlapping_sprites:\r\n ball.bottom=self.top\r\n if math.fabs(ball.x-self.x)<math.fabs(ball.x-self.left) and math.fabs(ball.x-self.x)<math.fabs(ball.x-self.right):\r\n ball.vertbounce()\r\n if math.fabs(ball.x-self.left)<math.fabs(ball.x-self.x) and math.fabs(ball.x-self.left)<math.fabs(ball.x-self.right):\r\n ball.leftbounce()\r\n if math.fabs(ball.x-self.right)<math.fabs(ball.x-self.left) and math.fabs(ball.x-self.right)<math.fabs(ball.x-self.x):\r\n ball.rightbounce()\r\n self.points.value+=10\r\n if self.points.value==500:\r\n ball.ballchange()\r\n elif self.points.value==2000:\r\n ball.ballchange()\r\n elif self.points.value==4500:\r\n ball.ballchange()\r\n elif self.points.value==10000:\r\n ball.ballchange()", "def __hit_paddle(self, g_object):\n return g_object == self.__paddle", "def check_if_won(self):\n if self.player_points > self.enemy_points:\n self.bHasWon = True\n else:\n self.bHasWon = False", "def hit(self, otherball):\r\n dx = (self.unif[0] + self.vx) - (otherball.unif[0] + otherball.vx)\r\n dy = (self.unif[1] + self.vy) - (otherball.unif[1] + otherball.vy)\r\n rd = self.radius + otherball.radius\r\n return dot(dx, dy) < (rd * rd)", "def check_if_over(self):\n if self.remainingBalls == 0:\n self.check_if_won()\n self.game_over = True", "def check_pin_ball_hit(time_elapsed):\n\n pass", "def goal_test(self, state):\n for x, y in state.alvos:\n if state.tabuleiro[x][y] is not BOX_ON_TARGET:\n return False\n return True", "def check_reached(self):\n m_x, m_y = self.destination.get_pos()\n m_radius = self.destination.radius\n distance_centre = math.sqrt((m_x - self.x)**2 + (m_y - self.y)**2)\n sum_radii = m_radius + self.radius\n if distance_centre < sum_radii:\n self.color = pygame.colordict.THECOLORS['green']\n self.has_reached = True", "def __handle_wall_collision(self):\n if self.__ball.x <= 0 or self.__ball.x + self.__ball.width >= self.__window.width:\n self.__dx = - self.__dx\n\n next_target_top = self.__window.get_object_at(self.__ball.x + self.__dx*1.5, self.__ball.y + self.__dy*1.5)\n next_target_bot = self.__window.get_object_at(self.__ball.x + self.__ball.width + self.__dx*1.5,\n self.__ball.y + self.__ball.height + self.__dy*1.5)\n\n if self.__hit_paddle(next_target_top) or self.__hit_paddle(next_target_bot):\n self.__dy = - abs(self.__dy)\n if self.__ball.x <= self.__paddle.x + 20:\n # The ball will fly left if hit the left of the paddle\n self.__dx = - abs(self.__dx)\n elif self.__ball.x > self.__paddle.x + self.__paddle.width - 20:\n # The ball will fly right if hit the right of the paddle\n self.__dx = abs(self.__dx)\n elif self.__hit_bricks(next_target_top) or self.__hit_bricks(next_target_bot):\n target_brick = next_target_top if next_target_top else next_target_bot\n self.__remove_brick(target_brick)\n self.__dy = - self.__dy\n elif self.__ball.y <= 0:\n self.__dy = - self.__dy\n elif self.__ball.y + self.__ball.height >= self.__window.height:\n self.__num_lives -= 1\n self.__playing = False\n self.__set_ball_position()\n self.__set_paddle_position()\n self.__set_ball_velocity()\n self.__set_record_board()", "def check_collision(self):\n if self.window.get_object_at(self.ball.x,self.ball.y+self.radius*2) is self.paddle:\n self.bounce()\n if self.window.get_object_at(self.ball.x+self.radius*2,self.ball.y+self.radius*2) is self.paddle:\n self.bounce()", "def play(target):\n # Initialize the scores\n # while no one has reached the target\n # Play a round for the player\n # If the player did not reach the target\n # Play a round for the opponent (primitive AI)\n # Display the results", "def check_shot_on_target(self, shot):\n # Defining a few variables to ease the reading\n # Here we define the x and y interval of the goal's segment\n x_min = min(self.s_pos.x, self.e_pos.x)\n x_max = max(self.s_pos.x, self.e_pos.x)\n\n y_min = min(self.s_pos.y, self.e_pos.y)\n y_max = max(self.s_pos.y, self.e_pos.y)\n\n # Shortening variables names\n o_x = shot.opponent.pos.x\n o_y = shot.opponent.pos.y\n\n # If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined\n # In these cases, the shot is vertical, therefore it is valid\n # iff the x coordinate of the opponent is in the goal's x interval\n if abs(shot.angle) == math.pi / 2:\n return self.is_in_interval(x_min, x_max, o_x)\n\n # If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to \n # undefined intersection points (if the goal is vertical for example)\n # although there is an intersection point\n # \n # In these cases, the shot is horizontal, therefore it is valid\n # iff the y coordinate of the opponent is in the goal's y interval\n if abs(shot.angle) == math.pi or shot.angle == 0:\n return self.is_in_interval(y_min, y_max, o_y)\n\n # Using tan the least amount of time possible, for this is a slow function\n tan_theta = math.tan(shot.angle)\n\n # Define the LE of the shot\n le1 = LinearEquation(tan_theta, o_y - tan_theta * o_x)\n le2 = None\n\n # If the goal is vertical, finding the intersection point\n # is not possible using the normal way\n #\n # That being said, unless the LE of the shot is vertical too (which it \n # isn't as it is checked before hand) there has to be an intersection point\n # This intersection must happen when at the x coodinate of the goal's segment\n # therefore, it is possible to compute the y coordinate of the intersection by\n # computing the application of the shot's LE on this ex coordinate\n #\n # Then, the resulting y is valid iff it is in the goal's segment interval\n if self.e_pos.x - self.s_pos.x == 0:\n y = le1.apply(self.e_pos.x)\n return self.is_in_interval(y_min, y_max, y)\n\n # The normal way of solving the intersection of these two LEs\n else:\n\n # Shortening variables by computing the coefficient of the goal's LE\n ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x)\n\n # If the lines are parallels (have the same coefficient) return False\n if math.tan(shot.angle) == ratio:\n return False\n\n # Defining the goal's LE\n le2 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio)\n\n # Finding the intersection point of the two LEs\n # If there isn't one, return False (but there should be one\n # given all the asserts we do before hand, this is just for completion sake)\n p_intersect = le1.intersection(le2)\n if p_intersect == None:\n return False\n\n # If the intersection point's abscissa is in the goal's x interval, then it is\n # a valid abstracted shot going \n return self.is_in_interval(x_min, x_max, p_intersect.x)", "def shooting_target(agent):\n ball = agent.info.ball\n car = agent.info.my_car\n car_to_ball = ball.location - car.location\n backline_intersect = line_backline_intersect(\n agent.their_goal.center[1], vec2(car.location), vec2(car_to_ball))\n if abs(backline_intersect) < 700:\n goal_to_ball = normalize(car.location - ball.location)\n error = 0\n else:\n # Right of the ball\n if -500 > backline_intersect:\n target = agent.their_goal.corners[3] + vec3(400, 0, 0)\n # Left of the ball\n elif backline_intersect > 500:\n target = agent.their_goal.corners[2] - vec3(400, 0, 0)\n goal_to_ball = normalize(ball.location - target)\n # Subtract the goal to car vector\n difference = goal_to_ball - normalize(car.location - target)\n error = cap(abs(difference[0]) + abs(difference[1]), 0, 5)\n\n goal_to_ball_2d = vec2(goal_to_ball[0], goal_to_ball[1])\n test_vector_2d = dot(rotation(0.5 * math.pi), goal_to_ball_2d)\n test_vector = vec3(test_vector_2d[0], test_vector_2d[1], 0)\n\n distance = cap((40 + distance_2d(ball.location, car.location) * (error ** 2)) / 1.8, 0, 4000)\n location = ball.location + vec3((goal_to_ball[0] * distance), goal_to_ball[1] * distance, 0)\n\n # this adjusts the target based on the ball velocity perpendicular\n # to the direction we're trying to hit it\n multiplier = cap(distance_2d(car.location, location) / 1500, 0, 2)\n distance_modifier = cap(dot(test_vector, ball.velocity) * multiplier, -1000, 1000)\n location += vec3(\n test_vector[0] * distance_modifier, test_vector[1] * distance_modifier, 0)\n\n # another target adjustment that applies if the ball is close to the wall\n extra = 3850 - abs(location[0])\n if extra < 0:\n location[0] = cap(location[0], -3850, 3850)\n location[1] = location[1] + (-sign(agent.team) * cap(extra, -800, 800))\n return location", "def updateLives(self):\n if self._ball.ballBottom() == True:\n self._tries = self._tries - 1\n return True", "def playfield_switch_hit(self, **kwargs):\n if (not self.balls or (kwargs.get('balls') and self.balls - kwargs['balls'] < 0)):\n self.mark_playfield_active()\n\n if not self.num_balls_requested:\n if self.machine.game:\n self.unexpected_balls = 1\n\n if self.machine.config['machine']['glass_off_mode']:\n self.log.debug(\"Playfield_active switch hit with no balls \"\n \"expected. glass_off_mode is enabled, so \"\n \"this will be ignored.\")\n else:\n self.log.debug(\"Playfield_active switch hit with no balls \"\n \"expected. glass_off_mode is not enabled, \"\n \"setting playfield ball count to 1\")\n\n self.balls = 1\n self.machine.events.post('unexpected_ball_on_' + self.name)", "def click(event):\r\n global score, targets_left, have_friend_param\r\n flag = 0\r\n mult = event.button\r\n for i in range(num_of_balls + 1):\r\n if balls_pool[i][6] > 0 and (event.pos[0] - balls_pool[i][0]) ** 2 + (event.pos[1] - balls_pool[i][1]) ** 2 <= \\\r\n balls_pool[i][2] ** 2:\r\n if i == 0:\r\n score += mult * max_rad * (1 + have_friend_param)\r\n screen.fill(YELLOW)\r\n else:\r\n score += mult * (max_rad + min_rad - balls_pool[i][2]) * (1 + have_friend_param)\r\n balls_pool[i][6] -= 1 * mult\r\n if balls_pool[i][6] <= 0:\r\n targets_left -= 1\r\n flag = 1\r\n\r\n if not flag:\r\n score -= mult * (max_rad + min_rad) // 10", "def __move_ball(self):\n while not self.__game_is_over():\n self.__ball.move(self.__dx, self.__dy)\n self.__handle_wall_collision()\n if self.__num_lives == 0:\n self.__game_over_picture()\n break\n elif self.__bricks_total == 0:\n self.__game_over_picture('You Win!!')\n break\n pause(FRAME_RATE)", "def still_going(ball_stats):\n if ball_stats[3] <= 0: # if vy = vx = 0 we should stop\n return False\n\n if ball_stats[0] > 41 * 2.54 or ball_stats[0] < 0: # checking if we are out of the lane\n return False\n pins_loc = ORIG_PINS_LOC.copy()\n for p in pins_loc:\n if dist((ball_stats[0], ball_stats[1]), p) < R_BALL + R_PIN: # checking if we hit one of the balls\n return False\n return True", "def _playfield_switch_hit(self, **kwargs):\n if self.balls <= 0 or (kwargs.get('balls') and self.balls - kwargs['balls'] < 0):\n self._mark_playfield_active()\n\n if not self.num_balls_requested:\n self.debug_log(\"Playfield was activated with no balls expected.\")\n self.machine.events.post('unexpected_ball_on_' + self.name)\n '''event: unexpected_ball_on_(name)\n desc: The playfield named (name) just had a switch hit,\n meaning a ball is on it, but that ball was not expected.\n '''\n\n self.ball_search.reset_timer()", "def detect_collision():\n # with the top and bottom of screen\n if ball.ycor() > GAME_Y_BARRIER or ball.ycor() < -GAME_Y_BARRIER:\n ball.bounce_y()\n # with the paddles\n if ball.distance(paddle_right) < 50 and ball.xcor() > GAME_X_BARRIER \\\n or ball.distance(paddle_left) < 50 and ball.xcor() < -GAME_X_BARRIER:\n ball.bounce_x()", "def at_target(self):\n return self.location == self.target_location", "def check_winner(self):\n pass", "def move():\n if randrange(40) == 0:\n y = randrange(-150, 150)\n target = vector(200, y)\n targets.append(target)\n\n for target in targets: # velocidad de los targets\n target.x -= target_speed\n\n if inside(ball):\n speed.y -= 0.35\n ball.move(speed)\n\n dupe = targets.copy()\n targets.clear()\n\n for target in dupe:\n if abs(target - ball) > 13:\n targets.append(target)\n\n for target in targets:\n if not inside(target):\n target.x = 200\n\n draw()\n\n ontimer(move, 50)", "def check_player_reached():\n global round_start_timer, round_over\n\n if player1.alive and player1.rect.top < (platform_width // 2):\n add_time_points()\n reset_players()\n player1.wins += 1\n return True\n\n elif player2.alive and (player2.rect.top + player2.image.get_height()) > \\\n (SCREEN_HEIGHT - platform_width):\n player2.wins += 1\n round_over = True\n add_time_points()\n reset_players()\n return True", "def chase(self, target):\n linear_dist = lambda x1, x2, y1, y2: math.sqrt((x1 - x2)**2 + \n (y1 - y2)**2)\n min_dist_to_target = linear_dist(self.x, target.x, \n self.y, target.y)\n possible_posn = [[1, 0], [-1, 0], [0, 1], [0, -1]]\n move_to_make = None\n\n for posn in possible_posn:\n if (self.x + posn[0] == self.handler.player.x and \n self.y + posn[1] == self.handler.player.y and \n self.handler.game_state != data.DEAD):\n dmg = self.deal_damage(self.handler.player)\n\n if dmg:\n self.handler.message_box.add_msg(\"{} attacks you for {} damage!\".format(self.name, dmg), \n data.COLOURS['mob_atk_text'])\n else:\n self.handler.message_box.add_msg(\"{} missed!\".format(self.name), \n data.COLOURS['mob_atk_text'])\n\n if self.handler.game_state == data.DEAD:\n self.handler.message_box.add_msg(\"{} killed you!\".format(self.name),\n data.COLOURS['player_die_text'])\n elif not self.handler.world.is_solid(self.x + posn[0], self.y + posn[1]):\n new_dist = linear_dist(self.x + posn[0], target.x,\n self.y + posn[1], target.y)\n if new_dist < min_dist_to_target:\n min_dist_to_target = new_dist\n move_to_make = posn\n\n if move_to_make:\n self.move(move_to_make[0], move_to_make[1])" ]
[ "0.71088266", "0.69896346", "0.6877738", "0.6813622", "0.67582506", "0.6734881", "0.67276996", "0.6638412", "0.66322345", "0.6532848", "0.6532257", "0.6510124", "0.6442143", "0.64397997", "0.6393392", "0.6384225", "0.6349237", "0.6344773", "0.6292504", "0.62760764", "0.6267416", "0.6266898", "0.62573165", "0.6246083", "0.6242558", "0.62382215", "0.6235397", "0.6205073", "0.61775964", "0.61679864" ]
0.79406464
0
Add a pin at the current mouse location. Turtle calls this when the user clicks the mouse.
def add_pin(x, y): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_mouse_press(self, x, y, button, modifiers):\n self.add_wall()", "def on_mouse_press(self, x, y, button):\n\n pass", "def follow(self):\n\t\tpos = pygame.mouse.get_pos()\n\t\tself.x = pos[0]\n\t\tself.y = pos[1]\n\t\tself.draw()", "def on_mouse_click(self, event: Event):\r\n self.control.add_gem(self.row, self.column)", "def handle_mouse(self, x, y):\n pass", "def grab(self, event):\n self.ypos = event.y\n self.xpos = event.x\n self.config(cursor='fleur')", "def handle_mouse_press(self, event):", "def follow_mouse(self, mouse):\n half_width = self.width() / 2\n self.left = mouse.get_x() - half_width\n self.right = mouse.get_x() + half_width", "def add_point(self, x: int, y: int):\n self.state[x, y] = 1", "def add_point(self, x: int, y: int):\n self.state[x, y] = 1", "def add_point(self, x: int, y: int):\n self.state[x, y] = 1", "def click(self, event):\r\n p = (event.x, self.toCartesian(event.y))\r\n self.tree.add(p)\r\n self.paint()", "def add_new(event):\n t = turtle.Turtle()\n screen_w, screen_h = t.screen._window_size()\n t.goto(event.x - screen_w//2, screen_h //2 - event.y)", "def mouse_press_event(self, x: int, y: int, button: int):\n pass", "def on_mouse_press(self, x: float, y: float, button: int, modifiers: int):\n # Fire!\n angle = self._get_angle_degrees(x, y)\n\n bullet = Bullet()\n bullet.fire(angle)\n\n self.bullets.append(bullet)", "def _on_pyglet_mouse_click(self, x, y, button, modifiers):\n button_time = clock()\n this_button = self._button_names[button]\n self._mouse_buffer.append((this_button, x, y, button_time))", "def addPoint(self, *args, **kwargs):\n ...", "def click(point):\n m = PyMouse()\n m.move(*point)\n m.press(*point)\n m.release(*point)", "def on_mouse_press(self, event):\n self.on_mouse_wheel(event)", "def mousePosition(self):", "def handle_press( self, x, y ):\n self.pressed_flag = True\n self.first_point = (x, y)", "def on_mouse_press(self, x, y, button, key_modifiers):\r\n pass", "def mouseDragged():\n if mousePressed:\n mousePressed()", "def mouse_position_event(self, x: int, y: int):\n pass", "def _add_point(self):\r\n self.command_stack.do(model.structure.AddPoint(self._structure, self._point_index+1, 0, 0))", "def on_mouse_motion(self, x: float, y: float, dx: float, dy: float):\n self.mouse_x = x\n self.mouse_y = y\n \n # set the rifle angle in degrees\n self.rifle.angle = self._get_angle_degrees(x, y)", "def add_point(self, xpos, ypos):\n self.arcpoints.append([xpos, ypos])", "def on_mouse_release(self, x, y, button):\n pass", "def on_mouse_press(self, x, y, button, modifiers):\n # Create a bullet\n bullet = arcade.Sprite(\"laserBlue05.png\", SPRITE_SCALING_LASER)\n\n # Position the bullet at the player's current location\n start_x = self.player_sprite.center_x\n start_y = self.player_sprite.center_y\n bullet.center_x = start_x\n bullet.center_y = start_y\n\n # Get from the mouse the destination location for the bullet\n # IMPORTANT! If you have a scrolling screen, you will also need\n # to add in self.view_bottom and self.view_left.\n dest_x = x\n dest_y = y\n\n # Do math to calculate how to get the bullet to the destination.\n # Calculation the angle in radians between the start points\n # and end points. This is the angle the bullet will travel.\n x_diff = dest_x - start_x\n y_diff = dest_y - start_y\n angle = math.atan2(y_diff, x_diff)\n\n # Angle the bullet sprite so it doesn't look like it is flying\n # sideways.\n bullet.angle = math.degrees(angle)\n print(f\"Bullet angle: {bullet.angle:.2f}\")\n\n # Taking into account the angle, calculate our change_x\n # and change_y. Velocity is how fast the bullet travels.\n bullet.change_x = math.cos(angle) * BULLET_SPEED\n bullet.change_y = math.sin(angle) * BULLET_SPEED\n\n # Add the bullet to the appropriate lists\n self.bullet_list.append(bullet)", "def on_mouse_motion(self, x, y, delta_x, delta_y):\r\n pass" ]
[ "0.663023", "0.6181395", "0.61187136", "0.6113998", "0.596101", "0.5897008", "0.58573025", "0.5833299", "0.5831222", "0.5831222", "0.5831222", "0.58044356", "0.5802026", "0.57402784", "0.5719253", "0.571508", "0.5689365", "0.5653827", "0.56509036", "0.5618501", "0.56167287", "0.55871165", "0.54559916", "0.543275", "0.53924084", "0.53743094", "0.53528595", "0.53349185", "0.53231084", "0.53201395" ]
0.666243
0
Take an input cubelist containing forecasts from different cycles and merges them into a single cube.
def process(self, cubelist: Union[List[Cube], CubeList]) -> Cube: cubelist = rebadge_forecasts_as_latest_cycle(cubelist) # Take all the realizations from all the input cube and # put in one array all_realizations = [cube.coord("realization").points for cube in cubelist] all_realizations = np.concatenate(all_realizations) # Find unique realizations unique_realizations = np.unique(all_realizations) # If we have fewer unique realizations than total realizations we have # duplicate realizations so we rebadge all realizations in the cubelist if len(unique_realizations) < len(all_realizations): first_realization = 0 for cube in cubelist: n_realization = len(cube.coord("realization").points) cube.coord("realization").points = np.arange( first_realization, first_realization + n_realization, dtype=np.int32 ) first_realization = first_realization + n_realization # slice over realization to deal with cases where direct concatenation # would result in a non-monotonic coordinate lagged_ensemble = MergeCubes()(cubelist, slice_over_realization=True) return lagged_ensemble
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def forecast_dataframe_to_cube(\n df: DataFrame, training_dates: DatetimeIndex, forecast_period: int,\n) -> Cube:\n\n representation_type = get_forecast_representation(df)\n\n fp_point = pd.Timedelta(int(forecast_period), unit=\"seconds\")\n\n cubelist = CubeList()\n\n for adate in training_dates:\n time_df = df.loc[(df[\"time\"] == adate) & (df[\"forecast_period\"] == fp_point)]\n\n time_df = _preprocess_temporal_columns(time_df)\n if time_df.empty:\n continue\n\n # The following columns are expected to contain one unique value\n # per column.\n for col in [\"period\", \"height\", \"cf_name\", \"units\", \"diagnostic\"]:\n _unique_check(time_df, col)\n\n if time_df[\"period\"].isna().all():\n time_bounds = None\n fp_bounds = None\n else:\n period = time_df[\"period\"].values[0]\n time_bounds = [adate - period, adate]\n fp_bounds = [fp_point - period, fp_point]\n\n time_coord = _define_time_coord(adate, time_bounds)\n height_coord = _define_height_coord(time_df[\"height\"].values[0])\n\n fp_coord = AuxCoord(\n np.array(\n fp_point.total_seconds(), dtype=TIME_COORDS[\"forecast_period\"].dtype\n ),\n \"forecast_period\",\n bounds=fp_bounds\n if fp_bounds is None\n else [\n np.array(f.total_seconds(), dtype=TIME_COORDS[\"forecast_period\"].dtype)\n for f in fp_bounds\n ],\n units=TIME_COORDS[\"forecast_period\"].units,\n )\n frt_coord = AuxCoord(\n np.array(\n time_df[\"forecast_reference_time\"].values[0].timestamp(),\n dtype=TIME_COORDS[\"forecast_reference_time\"].dtype,\n ),\n \"forecast_reference_time\",\n units=TIME_COORDS[\"forecast_reference_time\"].units,\n )\n\n for var_val in sorted(time_df[representation_type].unique()):\n var_df = time_df.loc[time_df[representation_type] == var_val]\n cf_name = var_df[\"cf_name\"].values[0]\n if representation_type == \"percentile\":\n var_coord = DimCoord(\n np.float32(var_val), long_name=\"percentile\", units=\"%\"\n )\n elif representation_type == \"realization\":\n var_coord = DimCoord(\n np.int32(var_val), standard_name=\"realization\", units=\"1\"\n )\n\n if \"station_id\" in var_df.columns:\n unique_site_id = var_df[\"station_id\"].values.astype(\"<U8\")\n unique_site_id_key = \"station_id\"\n else:\n unique_site_id = None\n unique_site_id_key = None\n\n cube = build_spotdata_cube(\n var_df[\"forecast\"].astype(np.float32),\n cf_name,\n var_df[\"units\"].values[0],\n var_df[\"altitude\"].astype(np.float32),\n var_df[\"latitude\"].astype(np.float32),\n var_df[\"longitude\"].astype(np.float32),\n var_df[\"wmo_id\"].values.astype(\"U5\"),\n unique_site_id,\n unique_site_id_key,\n scalar_coords=[\n time_coord,\n frt_coord,\n fp_coord,\n var_coord,\n height_coord,\n ],\n )\n cubelist.append(cube)\n\n if not cubelist:\n return\n cube = cubelist.merge_cube()\n\n if representation_type == \"percentile\":\n return RebadgePercentilesAsRealizations()(cube)\n return cube", "def _sort_cubelist(self, cubelist):\n sorted_cubelist = []\n realization_num = 1\n cubelist = cubelist.merge(unique=False)\n for cube in cubelist:\n # If time is a scalar coordinate, promote it to a dimension \n # coordinate, this is because all cubes must have the same number \n # of dimensions to be compared.\n if len(cube.coord(self.time_coord).points) == 1:\n cube = iris.util.new_axis(cube, scalar_coord=self.time_coord)\n \n # Chop cubes into individual realizations for relabelling.\n member_slices = get_coordinate_slice_dimensions(\n cube, [self.realization,self.forecast_ref_time],\n ignore_missing_coords=True)\n for member_slice in cube.slices(member_slices):\n \n if self.realization in [coord.name() \n for coord in member_slice.coords()]:\n member_slice.coord(\n self.realization).points = [realization_num]\n else:\n realization_coord = iris.coords.AuxCoord([realization_num],\n self.realization)\n member_slice.add_aux_coord(realization_coord)\n \n member_slice.cell_methods = None\n sorted_cubelist.append(member_slice)\n realization_num += 1\n \n sorted_cubelist = iris.cube.CubeList(sorted_cubelist)\n # Mask missing time steps so merging can be done.\n sorted_cubelist = pad_coords(sorted_cubelist, self.time_coord)\n cube = sorted_cubelist.merge_cube()\n # Check x-y coordinates match the specified range.\n cube = self._area_inst.check_cube_area_bounds(cube, self.xy_coords, \n self.area_bounds)\n cube = self.extract_area_bounds(cubes=cube)\n \n if cube.coord_dims(cube.coord(self.realization)) == \\\n cube.coord_dims(cube.coord(self.forecast_ref_time)):\n # Re order realizations in initialisation date order.\n ordered_inits = sorted(cube.coord('forecast_reference_time').points)\n ordered_mems = range(1, len(cube.coord('realization').points)+1)\n ordered_cubes = []\n for member_slice in cube.slices(member_slices):\n mem_index = ordered_inits.index(\n member_slice.coord(self.forecast_ref_time).points[0])\n member_slice.coord('realization').points = ordered_mems[mem_index]\n del ordered_inits[mem_index]\n del ordered_mems[mem_index]\n ordered_cubes.append(member_slice)\n cube = iris.cube.CubeList(ordered_cubes).merge_cube()\n \n return cube", "def truth_dataframe_to_cube(df: DataFrame, training_dates: DatetimeIndex,) -> Cube:\n\n cubelist = CubeList()\n for adate in training_dates:\n time_df = df.loc[(df[\"time\"] == adate)]\n\n time_df = _preprocess_temporal_columns(time_df)\n\n if time_df.empty:\n continue\n\n # The following columns are expected to contain one unique value\n # per column.\n _unique_check(time_df, \"diagnostic\")\n\n if time_df[\"period\"].isna().all():\n time_bounds = None\n else:\n period = time_df[\"period\"].values[0]\n time_bounds = [adate - period, adate]\n\n time_coord = _define_time_coord(adate, time_bounds)\n height_coord = _define_height_coord(time_df[\"height\"].values[0])\n\n if \"station_id\" in time_df.columns:\n unique_site_id = time_df[\"station_id\"].values.astype(\"<U8\")\n unique_site_id_key = \"station_id\"\n else:\n unique_site_id = None\n unique_site_id_key = None\n\n cube = build_spotdata_cube(\n time_df[\"ob_value\"].astype(np.float32),\n time_df[\"cf_name\"].values[0],\n time_df[\"units\"].values[0],\n time_df[\"altitude\"].astype(np.float32),\n time_df[\"latitude\"].astype(np.float32),\n time_df[\"longitude\"].astype(np.float32),\n time_df[\"wmo_id\"].values.astype(\"U5\"),\n unique_site_id,\n unique_site_id_key,\n scalar_coords=[time_coord, height_coord],\n )\n cubelist.append(cube)\n\n if not cubelist:\n return\n\n return cubelist.merge_cube()", "def _sort_data(self, cubelist):\n sorted_cubelist = []\n for dates in self.dates:\n year_cubelist = self.extract_dates(dates, cubelist)\n for cube in year_cubelist.merge():\n # Check x-y coordinates match the specified range.\n cube = self._area_inst.check_cube_area_bounds(cube, \n self.xy_coords, \n self.area_bounds)\n cube = self.extract_area_bounds(cubes=cube)\n sorted_cubelist.append(cube)\n return iris.cube.CubeList(sorted_cubelist)", "def _sort_data(self, cubelist):\n sorted_cubelist = []\n for dates in self.dates:\n year_cubelist = self.extract_dates(dates, cubelist)\n sorted_cubelist.append(self._sort_cubelist(year_cubelist))\n return iris.cube.CubeList(sorted_cubelist)", "def _load_all_cubes(self, files_to_load):\n if self.process_workers > 1:\n arguments = [[self, load_file] for load_file in files_to_load]\n pool = multiprocessing.Pool(processes=self.process_workers)\n try:\n all_cubelists = pool.map(run_load_file, arguments)\n pool.close()\n pool.join()\n except KeyboardInterrupt:\n pool.terminate()\n else:\n all_cubelists = []\n for load_file in files_to_load:\n cubelist = self._load_file(load_file)\n if cubelist:\n all_cubelists.append(cubelist)\n \n all_cubes = []\n for cubelist in all_cubelists:\n for cube in cubelist:\n all_cubes.append(cube)\n\n if len(all_cubes) == 0:\n raise UserWarning('No data loaded.')\n \n # Gather universal information from the first cube.\n if self.xy_coords is None:\n self.xy_coords = [coord.name() \n for coord in get_xy_coords(\n all_cubes[0])]\n if self._area_inst.bounds_range is None:\n self._area_inst.bounds_range = self._area_inst.\\\n get_cube_area_bounds(all_cubes[0],\n self.xy_coords)\n if self.area_bounds is None:\n self.area_bounds = self._area_inst.get_cube_area_bounds(\n all_cubes[0],\n self.xy_coords)\n self.time_unit = all_cubes[0].coord(self.time_coord).units\n \n return iris.cube.CubeList(all_cubes)", "def combine_netCDF_cmip6(directory, model):\n\n # Make a list of the files in the above folder to loop through\n list_files = glob.glob(directory)\n list_files = np.array(list_files)\n newlist = np.sort(list_files)\n\n # Make a cubelist to add each file (cube) to\n Cubelist = iris.cube.CubeList([])\n\n # loop for each file in newlist\n for i in range(0, len(newlist)):\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', FutureWarning)\n warnings.simplefilter('ignore', UserWarning)\n \n # Load cube\n cube = iris.load_cube(newlist[i])\n\n # remove latitude & longitude attributes\n cube.coord('latitude').attributes = {}\n cube.coord('longitude').attributes = {} \n \n # creating latitude and longitude bounds\n if model=='IPSL-CM6A-LR' or model=='CNRM-ESM2-1':\n cube.coord('latitude').guess_bounds()\n cube.coord('longitude').guess_bounds()\n \n # CESM2 bound issue fix\n if (model=='CESM2') & (i==0):\n lat_data = cube.coord('latitude').points\n lon_data = cube.coord('longitude').points\n lat_bounds = cube.coord('latitude').bounds\n lon_bounds = cube.coord('longitude').bounds\n elif (model=='CESM2') & (i>0):\n cube.coord('latitude').points = lat_data\n cube.coord('longitude').points = lon_data\n cube.coord('latitude').bounds = lat_bounds\n cube.coord('longitude').bounds = lon_bounds\n \n # removing time attributes\n if model=='IPSL-CM6A-LR':\n cube.coord('time').attributes.pop('time_origin')\n \n # Append this cube to the cubelist\n Cubelist.append(cube)\n\n # matching attributes\n unify_time_units(Cubelist)\n equalise_attributes(Cubelist)\n\n for cube in Cubelist:\n lon_bounds = Cubelist[0].coord('longitude').bounds\n cube.coord('longitude').bounds = lon_bounds\n\n for i, cube in enumerate(Cubelist):\n if cube.coord('time').units == Cubelist[0].coord('time').units:\n pass\n else:\n print(i)\n \n # Concatenate each cube in cubelist together to make one data file (cube)\n new_cube = Cubelist.concatenate_cube()\n\n return new_cube", "def _get_metadata(self): \n def add_dates(date_list, dates):\n \"\"\"\n Append dates to date_list which are not already within date_list.\n \n \"\"\"\n for date in dates:\n if date.strftime('%d-%b') not in date_list:\n date_list.append(date.strftime('%d-%b'))\n return date_list\n \n metadata = {'DATA_TYPE':'Hindcast Data'} \n \n members = 0\n self.cube_init_dates = []\n self.cube_dates = []\n years = []\n \n for cube in self.cubelist:\n cube_metadata = self._get_model_metadata(cube)\n members += cube_metadata['MEMBERS']\n del cube_metadata['MEMBERS']\n \n self.cube_init_dates = add_dates(self.cube_init_dates, \n cube_metadata['INITIALISATION_DATES'])\n del cube_metadata['INITIALISATION_DATES']\n \n self.cube_dates = add_dates(self.cube_dates, \n cube_metadata['FORECAST_DATES'])\n # Years are based on the earliest forecast date.\n years.append(min(cube_metadata['FORECAST_DATES']).year)\n del cube_metadata['FORECAST_DATES']\n \n for key, val in cube_metadata.items():\n # Find unique metadata which has not already been added by \n # previous cubes. Years are the common one.\n current_vals = metadata.get(key)\n if current_vals is not None:\n for this_val in current_vals:\n if hasattr(this_val, '__iter__'): \n try: \n if numpy.array_equal(this_val, val):\n break\n except AttributeError:\n # If the array type is not comparable for \n # example array of strings.\n equal = True\n for this_item, item in zip(this_val, val):\n if this_item != item:\n equal = False\n break\n if equal:\n break\n else:\n if this_val == val:\n break\n metadata[key].append(val)\n else:\n metadata[key] = [val]\n \n bound_names = []\n # Tidy up lists of length 1.\n for key, val in metadata.items():\n if type(val) == list and len(val) == 1:\n metadata[key] = val[0]\n # Retrieve the exact bound names.\n if key[-7:] == '_BOUNDS':\n bound_names.append(key)\n \n \n metadata['INITIALISATION_DATES'] = self.cube_init_dates\n metadata['YEARS'] = sorted(list(set(years)))\n metadata['TOTAL_MEMBERS'] = members\n metadata['FORECAST_DATES'] = self.cube_dates\n\n return self.MetaData(metadata, bound_names)", "def combine_netCDF_rh_cmip6(directory, model):\n\n # Make a list of the files in the above folder to loop through\n list_files = glob.glob(directory)\n list_files = np.array(list_files)\n newlist = np.sort(list_files)\n\n # Make a cubelist to add each file (cube) to\n Cubelist = iris.cube.CubeList([])\n\n # loop for each file in newlist\n for i in range(0, len(newlist)):\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', FutureWarning)\n warnings.simplefilter('ignore', UserWarning)\n \n # Load cube\n cube = iris.load_cube(newlist[i])\n \n # matching all standard names\n cube.standard_name = 'heterotrophic_respiration_carbon_flux'\n\n # matching cube metadata\n if i == 0:\n metadata1 = cube.metadata\n else:\n cube.metadata = metadata1\n \n # creating latitude and longitude bounds\n if model=='IPSL-CM6A-LR' or model=='CNRM-ESM2-1':\n cube.coord('latitude').guess_bounds()\n cube.coord('longitude').guess_bounds()\n \n # removing time attributes\n if model=='IPSL-CM6A-LR':\n cube.coord('time').attributes.pop('time_origin')\n \n # Append this cube to the cubelist\n Cubelist.append(cube)\n\n # matching attributes\n unify_time_units(Cubelist)\n equalise_attributes(Cubelist)\n # Concatenate each cube in cubelist together to make one data file (cube)\n new_cube = Cubelist.concatenate_cube()\n \n for cube in Cubelist:\n lon_bounds = Cubelist[0].coord('longitude').bounds\n cube.coord('longitude').bounds = lon_bounds\n\n for i, cube in enumerate(Cubelist):\n if cube.coord('time').units == Cubelist[0].coord('time').units:\n pass\n else:\n print(i)\n\n return new_cube", "def merge_wcs_counts_cubes(filelist):\n out_prim = None\n out_ebounds = None\n\n datalist_gti = []\n exposure_sum = 0.\n nfiles = len(filelist)\n ngti = np.zeros(nfiles, int)\n\n for i, filename in enumerate(filelist):\n fin = fits.open(filename)\n sys.stdout.write('.')\n sys.stdout.flush()\n if i == 0:\n out_prim = update_primary(fin[0], out_prim)\n out_ebounds = update_ebounds(fin[\"EBOUNDS\"], out_ebounds)\n (gti_data, exposure, tstop) = extract_gti_data(fin[\"GTI\"])\n datalist_gti.append(gti_data)\n exposure_sum += exposure\n ngti[i] = len(gti_data)\n if i == 0:\n first = fin\n elif i == nfiles - 1:\n date_end = fin[0].header['DATE-END']\n else:\n fin.close()\n\n out_gti = merge_all_gti_data(datalist_gti, ngti, first['GTI'])\n out_gti.header['EXPOSURE'] = exposure_sum\n out_gti.header['TSTOP'] = tstop\n\n hdulist = [out_prim, out_ebounds, out_gti]\n for hdu in hdulist:\n hdu.header['DATE-END'] = date_end\n\n out_prim.update_header()\n sys.stdout.write(\"!\\n\")\n return fits.HDUList(hdulist)", "def merge_hpx_counts_cubes(filelist):\n out_prim = None\n out_skymap = None\n out_ebounds = None\n\n datalist_gti = []\n exposure_sum = 0.\n nfiles = len(filelist)\n ngti = np.zeros(nfiles, int)\n\n out_name = None\n\n for i, filename in enumerate(filelist):\n fin = fits.open(filename)\n sys.stdout.write('.')\n sys.stdout.flush()\n if i == 0:\n out_prim = update_null_primary(fin[0], out_prim)\n out_name = fin[1].name\n\n map_in = HpxMap.create_from_hdulist(fin)\n out_skymap = update_hpx_skymap_allsky(map_in, out_skymap)\n if i == 0:\n try:\n out_ebounds = update_ebounds(fin[\"EBOUNDS\"], out_ebounds)\n except KeyError:\n out_ebounds = update_energies(fin[\"ENERGIES\"], out_ebounds)\n try:\n (gti_data, exposure, tstop) = extract_gti_data(fin[\"GTI\"])\n datalist_gti.append(gti_data)\n exposure_sum += exposure\n ngti[i] = len(gti_data)\n except KeyError:\n pass\n\n if i == 0:\n first = fin\n elif i == nfiles - 1:\n try:\n date_end = fin[0].header['DATE-END']\n except KeyError:\n date_end = None\n else:\n fin.close()\n\n out_skymap_hdu = out_skymap.create_image_hdu(\"SKYMAP\")\n\n hdulist = [out_prim, out_skymap_hdu, out_ebounds]\n\n if len(datalist_gti) > 0:\n out_gti = merge_all_gti_data(datalist_gti, ngti, first['GTI'])\n out_gti.header['EXPOSURE'] = exposure_sum\n out_gti.header['TSTOP'] = tstop\n hdulist.append(out_gti)\n\n for hdu in hdulist:\n if date_end:\n hdu.header['DATE-END'] = date_end\n\n out_prim.update_header()\n sys.stdout.write(\"!\\n\")\n\n return fits.HDUList(hdulist)", "def forecast_and_truth_dataframes_to_cubes(\n forecast_df: DataFrame,\n truth_df: DataFrame,\n cycletime: str,\n forecast_period: int,\n training_length: int,\n percentiles: Optional[List[float]] = None,\n experiment: Optional[str] = None,\n) -> Tuple[Cube, Cube]:\n training_dates = _training_dates_for_calibration(\n cycletime, forecast_period, training_length\n )\n\n forecast_df, truth_df = _prepare_dataframes(\n forecast_df,\n truth_df,\n forecast_period,\n percentiles=percentiles,\n experiment=experiment,\n )\n\n forecast_cube = forecast_dataframe_to_cube(\n forecast_df, training_dates, forecast_period\n )\n truth_cube = truth_dataframe_to_cube(truth_df, training_dates)\n return forecast_cube, truth_cube", "def _extract_input_cubes(self, cubes: Union[CubeList, List[Cube]]) -> None:\n if isinstance(cubes, list):\n cubes = iris.cube.CubeList(cubes)\n if len(cubes) != 2:\n raise ValueError(f\"Expected 2 cubes, found {len(cubes)}\")\n\n if not spatial_coords_match(cubes):\n raise ValueError(\n \"Spatial coords mismatch between \" f\"{cubes[0]} and \" f\"{cubes[1]}\"\n )\n extracted_cube = cubes.extract(\"altitude_of_snow_falling_level\")\n if extracted_cube:\n (self.falling_level_cube,) = extracted_cube\n self.param = \"snow\"\n self.comparator = operator.gt\n self.get_discriminating_percentile = self.percentile_plugin(\n self.radius, percentiles=[80.0]\n )\n elif cubes.extract(\"altitude_of_rain_falling_level\"):\n extracted_cube = cubes.extract(\"altitude_of_rain_falling_level\")\n (self.falling_level_cube,) = extracted_cube\n self.param = \"rain\"\n self.comparator = operator.lt\n # We want rain that has come from sleet at or above the surface, so inverse of 80th\n # centile is the 20th centile.\n self.get_discriminating_percentile = self.percentile_plugin(\n self.radius, percentiles=[20.0]\n )\n else:\n extracted_cube = cubes.extract(\"altitude_of_rain_from_hail_falling_level\")\n if not extracted_cube:\n raise ValueError(\n \"Could not extract a rain, rain from hail or snow falling-level \"\n f\"cube from {cubes}\"\n )\n (self.falling_level_cube,) = extracted_cube\n self.param = \"rain_from_hail\"\n self.comparator = operator.lt\n self.get_discriminating_percentile = self.percentile_plugin(\n self.radius, percentiles=[20.0]\n )\n orography_name = \"surface_altitude\"\n extracted_cube = cubes.extract(orography_name)\n if extracted_cube:\n (self.orography_cube,) = extracted_cube\n else:\n raise ValueError(\n f\"Could not extract {orography_name} cube from \" f\"{cubes}\"\n )\n\n if self.falling_level_cube.units != self.orography_cube.units:\n self.falling_level_cube = self.falling_level_cube.copy()\n self.falling_level_cube.convert_units(self.orography_cube.units)", "def combine_netCDF_cSoil_cmip6(directory, model):\n\n # Make a list of the files in the above folder to loop through\n list_files = glob.glob(directory)\n list_files = np.array(list_files)\n newlist = np.sort(list_files)\n\n # Make a cubelist to add each file (cube) to\n Cubelist = iris.cube.CubeList([])\n\n # loop for each file in newlist\n for i in range(0, len(newlist)):\n with warnings.catch_warnings():\n \n warnings.simplefilter('ignore', FutureWarning)\n warnings.simplefilter('ignore', UserWarning)\n \n # Load cube\n cube = iris.load_cube(newlist[i])\n \n # matching all standard names\n cube.standard_name = 'soil_carbon_content'\n \n # creating latitude and longitude bounds\n if model=='IPSL-CM6A-LR' or model=='CNRM-ESM2-1':\n cube.coord('latitude').guess_bounds()\n cube.coord('longitude').guess_bounds()\n \n # removing time attributes\n if model=='IPSL-CM6A-LR':\n cube.coord('time').attributes.pop('time_origin')\n \n # Append this cube to the cubelist\n Cubelist.append(cube)\n\n # matching attributes\n unify_time_units(Cubelist)\n equalise_attributes(Cubelist)\n \n for cube in Cubelist:\n lon_bounds = Cubelist[0].coord('longitude').bounds\n cube.coord('longitude').bounds = lon_bounds\n\n for i, cube in enumerate(Cubelist):\n if cube.coord('time').units == Cubelist[0].coord('time').units:\n pass\n else:\n print(i)\n\n # Concatenate each cube in cubelist together to make one data file (cube)\n new_cube = Cubelist.concatenate_cube()\n\n return new_cube", "def update_metadata(cube_list, infile_history):\n\n equalise_attributes(cube_list)\n\n for cube in cube_list:\n cube.attributes['history'] = gio.write_metadata(file_info=infile_history)\n cube.data = numpy.array(cube.data) #removes _FillValue attribute\n\n return cube_list", "def combine_netCDF_model(directory, model):\n\n # Make a list of the files in the above folder to loop through\n list_files = glob.glob(directory)\n list_files = np.array(list_files)\n newlist = np.sort(list_files)\n\n # Make a cubelist to add each file (cube) to\n Cubelist = iris.cube.CubeList([])\n\n # loop for each file in newlist\n for i in range(0, len(newlist)):\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', FutureWarning)\n warnings.simplefilter('ignore', UserWarning)\n\n # Load cube\n cube = iris.load_cube(newlist[i])\n\n # Append this cube to the cubelist\n Cubelist.append(cube)\n\n # matching attributes\n unify_time_units(Cubelist)\n equalise_attributes(Cubelist)\n\n # Concatenate each cube in cubelist together to make one data file (cube)\n new_cube = Cubelist.concatenate_cube()\n\n return new_cube", "def extract_dates(self, dates=None, cubes=None, bounds=None, \n time_coord=None):\n if time_coord is None:\n time_coord = self.time_coord\n if cubes is None:\n try:\n assert self.cube, ('Data has not been loaded, use load '\\\n 'method.')\n single_cube = True\n data = iris.cube.CubeList([self.cube])\n except AttributeError:\n assert self.cubelist, ('Data has not been loaded, use load '\\\n 'method.')\n single_cube = False\n data = self.cubelist\n if dates is None:\n print 'Dates have already been extracted'\n return self.cube\n \n time_unit = self.time_unit\n else:\n if cubes:\n if type(cubes) == iris.cube.CubeList:\n single_cube = False\n data = cubes\n else:\n single_cube = True\n data = iris.cube.CubeList([cubes])\n time_unit = data[0].coord(time_coord).units\n else:\n print 'No data in given cube(s).'\n return cubes \n \n if dates:\n check_dates(dates)\n else:\n dates = self.dates\n if bounds is None:\n dates_are_bounds = self._dates_are_bounds\n else:\n dates_are_bounds = bounds\n if dates_are_bounds:\n min_date = min(dates)\n min_date = datetime.datetime(min_date.year, min_date.month, \n min_date.day, 0)\n max_date = max(dates)\n max_date = datetime.datetime(max_date.year, max_date.month, \n max_date.day, 23, 59)\n time_constraint = get_time_bound_constraints(min_date, \n max_date,\n time_coord)\n else:\n # Create daily bounds for the given dates.\n bound_dates = []\n for date in dates:\n bound_dates.append([datetime.datetime(date.year,\n date.month,\n date.day,\n 0),\n datetime.datetime(date.year,\n date.month,\n date.day,\n 23,\n 59)]) \n time_constraint = get_list_of_time_bound_constraints(bound_dates, \n time_coord)\n extracted_cubes = data.extract(time_constraint)\n if not extracted_cubes:\n raise UserWarning('Data does not contain the given \"%s\" dates, %s' \n % (time_coord,\n [date.strftime('%d/%m/%Y') for date in dates]))\n \n # Check if any of the requested dates were not in the data.\n # First compare the number of requested dates with the number of dates\n # extracted.\n extracted_dates = []\n for cube in extracted_cubes:\n for coord_point in cube.coord(time_coord).points:\n extracted_dates.append(coord_point)\n # If bounds is True, it is likely this will not be equal.\n if len(set(extracted_dates)) != len(dates):\n # If this is the case, check the dates manually.\n dates_not_in_data = []\n for date in dates:\n date_value = cube_time_converter(date, time_unit)\n found = False\n for cube in extracted_cubes:\n # If time coordinate has bounds use these.\n if cube.coord(time_coord).bounds is not None:\n for bounds in cube.coord(time_coord).bounds:\n if date_value >= min(bounds) and \\\n date_value <= max(bounds):\n found = True\n break\n # Else use coordinate points.\n else:\n min_date = datetime.datetime(date.year, date.month, \n date.day, 0)\n min_date_val = cube_time_converter(min_date, time_unit)\n max_date = datetime.datetime(date.year, date.month, \n date.day, 23, 59)\n max_date_val = cube_time_converter(max_date, time_unit)\n for point in cube.coord(time_coord).points:\n if min_date_val <= point <= max_date_val:\n found = True\n break\n if not found:\n dates_not_in_data.append(date.strftime('%d/%m/%Y'))\n\n if dates_not_in_data:\n print 'Data does not contain the following \"%s\" dates, %s' \\\n % (time_coord, dates_not_in_data)\n \n if single_cube:\n extracted_cubes = extracted_cubes[0]\n \n return extracted_cubes", "def _get_metadata(self): \n metadata = {'DATA_TYPE':'Forecast Data'} \n \n cube_metadata = self._get_model_metadata(self.cube)\n \n self.cube_init_dates = cube_metadata['INITIALISATION_DATES']\n del cube_metadata['INITIALISATION_DATES']\n \n self.cube_dates = cube_metadata['FORECAST_DATES']\n del cube_metadata['FORECAST_DATES']\n \n for key, val in cube_metadata.items():\n # Find unique metadata which has not already been added by \n # previous cubes. Years are the common one.\n current_vals = metadata.get(key)\n if current_vals is not None:\n for this_val in current_vals:\n if hasattr(this_val, '__iter__'): \n try: \n if numpy.array_equal(this_val, val):\n break\n except AttributeError:\n # If the array type is not comparable for \n # example array of strings.\n equal = True\n for this_item, item in zip(this_val, val):\n if this_item != item:\n equal = False\n break\n if equal:\n break\n else:\n if this_val == val:\n break\n metadata[key].append(val)\n else:\n metadata[key] = [val]\n \n bound_names = []\n # Tidy up list of length 1.\n for key, val in metadata.items():\n if type(val) == list and len(val) == 1:\n metadata[key] = val[0]\n # Retrieve the exact bound names.\n if key[-7:] == '_BOUNDS':\n bound_names.append(key)\n \n metadata['INITIALISATION_DATES'] = [date.strftime('%d/%m/%Y') \n for date in \n self.cube_init_dates]\n metadata['FORECAST_DATES'] = [date.strftime('%d/%m/%Y') \n for date in self.cube_dates]\n\n return self.MetaData(metadata, bound_names)", "def fix_metadata(self, cubes: CubeList) -> CubeList:\n return cubes", "def multiproc_vca(subcube_locs,channels,output_loc,fig_loc,dimensions):\n\t\n\twith schwimmbad.MultiPool() as pool:\n\t\tprint('started multi processing')\n\t\tprint(datetime.datetime.now())\n\n\t\t#create the lists for multiprocessing\n\t\t#vcacube=[f'{subcube_locs}_{dimensions}x{dimensions}_x{i}_y{j}.fits' for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\t\tvcacube=[f'{k}_{dimensions}x{dimensions}_x{i}_y{j}.fits' for k in subcube_locs for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\t\tchansamps=[channels for j in np.arange(0,dimensions) for k in subcube_locs for i in np.arange(0,dimensions)]\n\t\t#arrayloc=[f'{output_loc}_{dimensions}x{dimensions}_x{i}_y{j}' for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\t\tarrayloc=[f'{k}_{dimensions}x{dimensions}_x{i}_y{j}' for k in output_loc for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\t\t#figloc=[f'{fig_loc}_{dimensions}x{dimensions}_x{i}_y{j}' for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\t\tfigloc=[f'{k}_{dimensions}x{dimensions}_x{i}_y{j}' for k in fig_loc for j in np.arange(0,dimensions) for i in np.arange(0,dimensions)]\n\n\n\t\tinputs=list(zip(vcacube,chansamps,arrayloc,figloc))\n\t\tprint(f'THESE ARE THE INPUTS FOR MULTIPROCESSING:{inputs}')\n\n\t\tout = list(pool.map(do_vca, inputs))\n\t\tprint('finished multiprocessing')\n\t\tprint(datetime.datetime.now())\n\tprint(out)", "def setUp(self):\n super().setUp()\n frt_dt = datetime.datetime(2017, 11, 10, 0, 0)\n time_dt = datetime.datetime(2017, 11, 10, 4, 0)\n\n base_data = np.array(\n [\n [[0.3, 1.1, 2.6], [4.2, 5.3, 5.9], [7.1, 8.2, 8.8]],\n [[0.7, 2.0, 2.9], [4.3, 5.6, 6.4], [7.0, 7.0, 9.2]],\n [[2.1, 3.0, 3.1], [4.8, 5.0, 6.1], [7.9, 8.1, 8.9]],\n ],\n dtype=np.float32,\n )\n temperature_data = Unit(\"Celsius\").convert(base_data, \"Kelvin\")\n self.current_temperature_forecast_cube = set_up_variable_cube(\n temperature_data,\n units=\"Kelvin\",\n realizations=[0, 1, 2],\n time=time_dt,\n frt=frt_dt,\n attributes=MANDATORY_ATTRIBUTE_DEFAULTS,\n )\n time_dt = time_dt - datetime.timedelta(days=5)\n frt_dt = frt_dt - datetime.timedelta(days=5)\n\n # Create historic forecasts and truth\n self.historic_forecasts = _create_historic_forecasts(\n temperature_data, time_dt, frt_dt, realizations=[0, 1, 2]\n )\n self.truth = _create_truth(temperature_data, time_dt)\n\n # Create a combined list of historic forecasts and truth\n self.combined = self.historic_forecasts + self.truth\n\n # Create the historic and truth cubes\n self.historic_temperature_forecast_cube = self.historic_forecasts.merge_cube()\n # Ensure the forecast coordinates are in the order: realization, time, lat, lon.\n self.historic_temperature_forecast_cube.transpose([1, 0, 2, 3])\n self.temperature_truth_cube = self.truth.merge_cube()\n\n # Create a cube for testing wind speed.\n self.current_wind_speed_forecast_cube = set_up_variable_cube(\n base_data,\n name=\"wind_speed\",\n units=\"m s-1\",\n realizations=[0, 1, 2],\n attributes=MANDATORY_ATTRIBUTE_DEFAULTS,\n )\n\n self.historic_wind_speed_forecast_cube = _create_historic_forecasts(\n base_data,\n time_dt,\n frt_dt,\n realizations=[0, 1, 2],\n name=\"wind_speed\",\n units=\"m s-1\",\n ).merge_cube()\n # Ensure the forecast coordinates are in the order: realization, time, lat, lon.\n self.historic_wind_speed_forecast_cube.transpose([1, 0, 2, 3])\n\n self.wind_speed_truth_cube = _create_truth(\n base_data, time_dt, name=\"wind_speed\", units=\"m s-1\"\n ).merge_cube()\n\n # Set up another set of cubes which have a halo of zeros round the\n # original data. This data will be masked out in tests using a\n # landsea_mask\n base_data = np.pad(base_data, ((0, 0), (1, 1), (1, 1)), mode=\"constant\")\n temperature_data = Unit(\"Celsius\").convert(base_data, \"Kelvin\")\n\n # Create historic forecasts and truth\n self.historic_forecasts_halo = _create_historic_forecasts(\n temperature_data, time_dt, frt_dt, realizations=[0, 1, 2]\n )\n self.truth_halo = _create_truth(temperature_data, time_dt)\n\n # Create the historic and truth cubes\n self.historic_temperature_forecast_cube_halo = (\n self.historic_forecasts_halo.merge_cube()\n )\n self.temperature_truth_cube_halo = self.truth_halo.merge_cube()\n\n # Create a cube for testing wind speed.\n self.historic_wind_speed_forecast_cube_halo = _create_historic_forecasts(\n base_data,\n time_dt,\n frt_dt,\n realizations=[0, 1, 2],\n name=\"wind_speed\",\n units=\"m s-1\",\n ).merge_cube()\n\n self.wind_speed_truth_cube_halo = _create_truth(\n base_data, time_dt, name=\"wind_speed\", units=\"m s-1\"\n ).merge_cube()\n\n data = np.array([1.6, 1.3, 1.4, 1.1])\n altitude = np.array([10, 20, 30, 40])\n latitude = np.linspace(58.0, 59.5, 4)\n longitude = np.linspace(-0.25, 0.5, 4)\n wmo_id = [\"03001\", \"03002\", \"03003\", \"03004\"]\n forecast_spot_cubes = iris.cube.CubeList()\n for realization in range(1, 3):\n realization_coord = [\n iris.coords.DimCoord(realization, standard_name=\"realization\")\n ]\n for day in range(5, 11):\n time_coords = construct_scalar_time_coords(\n datetime.datetime(2017, 11, day, 4, 0),\n None,\n datetime.datetime(2017, 11, day, 0, 0),\n )\n time_coords = [t[0] for t in time_coords]\n forecast_spot_cubes.append(\n build_spotdata_cube(\n data + 0.2 * day,\n \"air_temperature\",\n \"degC\",\n altitude,\n latitude,\n longitude,\n wmo_id,\n scalar_coords=time_coords + realization_coord,\n )\n )\n forecast_spot_cube = forecast_spot_cubes.merge_cube()\n\n self.historic_forecast_spot_cube = forecast_spot_cube[:, :5, :]\n self.historic_forecast_spot_cube.convert_units(\"Kelvin\")\n self.historic_forecast_spot_cube.attributes = MANDATORY_ATTRIBUTE_DEFAULTS\n\n self.current_forecast_spot_cube = forecast_spot_cube[:, 5, :]\n self.current_forecast_spot_cube.convert_units(\"Kelvin\")\n self.current_forecast_spot_cube.attributes = MANDATORY_ATTRIBUTE_DEFAULTS\n\n self.truth_spot_cube = self.historic_forecast_spot_cube[0].copy()\n self.truth_spot_cube.remove_coord(\"realization\")\n self.truth_spot_cube.data = self.truth_spot_cube.data + 1.0\n\n self.spot_altitude_cube = forecast_spot_cube[0, 0].copy(\n forecast_spot_cube.coord(\"altitude\").points\n )\n self.spot_altitude_cube.rename(\"altitude\")\n self.spot_altitude_cube.units = \"m\"\n for coord in [\n \"altitude\",\n \"forecast_period\",\n \"forecast_reference_time\",\n \"realization\",\n \"time\",\n ]:\n self.spot_altitude_cube.remove_coord(coord)", "def create_climatology(self):\n # Because data from all years are merged, the time coordinate must be \n # made consistent with basic integer values. Monthly dates values are \n # added to the attributes.\n time_points = self.cubelist[0].coord(self.time_coord).points\n new_time_points = range(1, len(time_points) + 1)\n new_time_atts = {'dates' : self.cube_dates}\n new_time_coord = iris.coords.DimCoord(new_time_points,\n standard_name=self.time_coord,\n attributes=new_time_atts)\n \n new_cubelist = []\n realization_num = 1\n for cube in self.cubelist:\n if len(cube.coord(self.realization).points) > 1:\n cube = self.cube_ensemble_mean(cube)\n # Make sure all realization points are unique.\n cube.coord(self.realization).points = [realization_num]\n # Replace time dimension.\n time_dim = cube.coord_dims(cube.coord(self.time_coord))\n cube.remove_coord(self.time_coord)\n if time_dim:\n cube.add_dim_coord(new_time_coord, time_dim)\n else:\n # If no time_dim, coordinate is auxiliary or scalar.\n cube.add_aux_coord(new_time_coord)\n \n new_cubelist.append(cube)\n realization_num += 1\n \n new_cube = iris.cube.CubeList(new_cubelist).merge_cube()\n clim_cube = self.cube_ensemble_mean(new_cube)\n \n # The initialisation data is now a mean of all years, so like with time\n # replace the coordinate with a single point and monthly initialisation\n # dates added to the attributes.\n init_points = clim_cube.coord(self.forecast_ref_time).points\n new_init_points = range(1, len(init_points) + 1)\n new_init_atts = {'dates' : self.cube_init_dates}\n new_init_coord = iris.coords.AuxCoord(new_init_points,\n standard_name=self.forecast_ref_time,\n attributes=new_init_atts)\n clim_cube.remove_coord(self.forecast_ref_time)\n clim_cube.add_aux_coord(new_init_coord)\n \n self.clim_cube = clim_cube\n return self.clim_cube", "def output_scene_list_csv(dest_all_path, file_list, dest_prefix='calfin'): \n \n calendar = []\n for file_path in file_list:\n file_name = os.path.basename(file_path)\n file_name_parts = file_name.split('_')\n domain = file_name_parts[0]\n satellite = file_name_parts[1]\n if satellite.startswith('S'):\n #Astakhov-Chugunov-Astapenko_S1B_EW_GRDM_1SDH_2018-06-26_011542_01536C_EB6F\n # datatype = file_name_parts[2]\n level = file_name_parts[3]\n date_dashed = file_name_parts[4]\n date = date_dashed.replace('-', '')\n orbit = file_name_parts[5]\n # bandpol = 'hh'\n elif satellite.startswith('L'):\n #Brückner_LC08_L1TP_2015-06-14_232-014_T1_B5_66-1_validation\n # datatype = file_name_parts[2]\n date_dashed = file_name_parts[3]\n date = date_dashed.replace('-', '')\n orbit = file_name_parts[4].replace('-', '')\n level = file_name_parts[5]\n # bandpol = file_name_parts[6]\n scene_id = scene_hash_table[date][orbit][satellite][level]\n else:\n raise ValueError('Unrecognized sattelite!')\n calendar.append([domain, scene_id])\n \n calendar_path = os.path.join(dest_all_path, dest_prefix + '_scene_list.csv')\n pd.DataFrame.from_dict(data=pd.DataFrame(calendar), orient='columns').to_csv(calendar_path, header=False, index=False, encoding='utf-8')\n return calendar", "def zarr_concat(input_zarrs: List[str], output_zarr: str, verbose: bool = False) -> None:\n\n output_dataset = ChunkedDataset(output_zarr)\n if os.path.exists(output_zarr):\n output_dataset.open(\"a\")\n else:\n output_dataset.initialize()\n\n for input_zarr in input_zarrs:\n\n input_dataset = ChunkedDataset(input_zarr)\n input_dataset.open()\n\n if verbose:\n print(f\"input scenes size: {input_dataset.scenes.shape[0]}\")\n print(f\"input frames size: {input_dataset.frames.shape[0]}\")\n print(f\"input agents size: {input_dataset.agents.shape[0]}\")\n\n frame_offset = output_dataset.frames.shape[0]\n new_scenes = np.zeros(input_dataset.scenes.shape[0], dtype=SCENE_DTYPE)\n\n for i, scene in enumerate(input_dataset.scenes): # add new scenes to zarr\n scene[\"frame_index_interval\"] = scene[\"frame_index_interval\"] + frame_offset\n new_scenes[i] = scene\n output_dataset.scenes.append(new_scenes)\n\n agent_offset = output_dataset.agents.shape[0]\n new_frames = np.zeros(input_dataset.frames.shape[0], dtype=FRAME_DTYPE)\n for i, frame in enumerate(input_dataset.frames): # add new frames to the zarr\n frame[\"agent_index_interval\"] = frame[\"agent_index_interval\"] + agent_offset\n new_frames[i] = frame\n output_dataset.frames.append(new_frames)\n\n output_dataset.agents.append(input_dataset.agents) # add new agents to the zarr\n\n if verbose:\n print(f\"output scenes size: {output_dataset.scenes.shape[0]}\")\n print(f\"output frames size: {output_dataset.frames.shape[0]}\")\n print(f\"output agents size: {output_dataset.agents.shape[0]}\")", "def flatten_cube(cube):\n nb=cube.shape[0]\n cube_flattened = np.zeros_like(cube)\n for i in range(nb):\n cube_flattened[i,:,:] = flatten_image(cube[i,:,:])\n return cube_flattened", "def marching_cubes_to_obj(marching_cubes_out, output_file):\n\n verts, faces, normals, _ = marching_cubes_out\n with open(output_file, \"w\") as f:\n for item in verts:\n f.write(f\"v {item[0]} {item[1]} {item[2]}\\n\")\n for item in normals:\n f.write(f\"vn {item[0]} {item[1]} {item[2]}\\n\")\n for item in faces:\n f.write(\n f\"f {item[0]}//{item[0]} {item[1]}//{item[1]} \"\n f\"{item[2]}//{item[2]}\\n\"\n )\n f.close()", "def combine_netCDF_cmip5(directory, variable, model):\n\n # Make a list of the files in the above folder to loop through\n list_files = glob.glob(directory)\n list_files = np.array(list_files)\n newlist = np.sort(list_files)\n\n # Make a cubelist to add each file (cube) to\n Cubelist = iris.cube.CubeList([])\n\n # loop for each file in newlist\n for i in range(0, len(newlist)):\n\n with warnings.catch_warnings():\n warnings.simplefilter('ignore', FutureWarning)\n warnings.simplefilter('ignore', UserWarning)\n\n # Load each file named variable as a cube\n cube = iris.load_cube(newlist[i], variable)\n\n # CRUDE WORKAROUND TO REMOVE OVERLAPPING COORDINATE\n if model == 'HadGEM2-ES':\n if i == 3: # change to 9 if timeseries plots\n cube = cube[0:-1]\n\n # Append this cube to the cubelist\n Cubelist.append(cube)\n\n # matching attributes\n iris.util.unify_time_units(Cubelist)\n equalise_attributes(Cubelist)\n # Concatenate each cube in cubelist together to make one data file (cube)\n new_cube = Cubelist.concatenate_cube()\n\n return new_cube", "def getCube(unique_name):", "def calculate(cubes):\n co2_cube = cubes.extract_cube(\n iris.Constraint(name='mole_fraction_of_carbon_dioxide_in_air'))\n ps_cube = cubes.extract_cube(\n iris.Constraint(name='surface_air_pressure'))\n\n # Fill masked data if necessary (interpolation fails with masked data)\n (z_axis,) = co2_cube.coord_dims(co2_cube.coord(axis='Z',\n dim_coords=True))\n mask = da.ma.getmaskarray(co2_cube.core_data())\n if mask.any():\n first_unmasked_data = _get_first_unmasked_data(\n co2_cube.core_data(), axis=z_axis)\n dim_map = [dim for dim in range(co2_cube.ndim) if dim != z_axis]\n first_unmasked_data = iris.util.broadcast_to_shape(\n first_unmasked_data, co2_cube.shape, dim_map)\n co2_cube.data = da.where(mask, first_unmasked_data,\n co2_cube.core_data())\n\n # Interpolation (not supported for dask arrays)\n air_pressure_coord = co2_cube.coord('air_pressure')\n original_levels = iris.util.broadcast_to_shape(\n air_pressure_coord.points, co2_cube.shape,\n co2_cube.coord_dims(air_pressure_coord))\n target_levels = np.expand_dims(ps_cube.data, axis=z_axis)\n co2s_data = stratify.interpolate(\n target_levels,\n original_levels,\n co2_cube.data,\n axis=z_axis,\n interpolation='linear',\n extrapolation='linear',\n )\n co2s_data = np.squeeze(co2s_data, axis=z_axis)\n\n # Construct co2s cube\n indices = [slice(None)] * co2_cube.ndim\n indices[z_axis] = 0\n co2s_cube = co2_cube[tuple(indices)]\n co2s_cube.data = co2s_data\n if co2s_cube.coords('air_pressure'):\n co2s_cube.remove_coord('air_pressure')\n ps_coord = iris.coords.AuxCoord(ps_cube.data,\n var_name='plev',\n standard_name='air_pressure',\n long_name='pressure',\n units=ps_cube.units)\n co2s_cube.add_aux_coord(ps_coord, np.arange(co2s_cube.ndim))\n co2s_cube.convert_units('1e-6')\n return co2s_cube", "def readForecasts(cycle, leadtimes=None, xyindices=None, domain='uk', datadir='./'):\n\n print('[INFO] Assigning wave forecast data to arrays')\n wsGrid = wvrd.readWaveL1('WSPD', cycle, leadtimes=leadtimes, xyindices=xyindices, domain=domain, datadir=datadir)\n fctimes = wsGrid.fclead / 3600\n ws = wsGrid.data\n wdir = wvrd.readWaveL1('WDIR', cycle, leadtimes=leadtimes, xyindices=xyindices, domain=domain, datadir=datadir).data\n hm0 = wvrd.readWaveL1('VHM0', cycle, leadtimes=leadtimes, xyindices=xyindices, domain=domain, datadir=datadir).data\n tp = wvrd.readWaveL1('VTPK', cycle, leadtimes=leadtimes, xyindices=xyindices, domain=domain, datadir=datadir).data\n dirn = wvrd.readWaveL1('VMDR', cycle, leadtimes=leadtimes, xyindices=xyindices, domain=domain, datadir=datadir).data\n offdepth = wvrd.readWaveL1('deptho', cycle, leadtimes=None, xyindices=xyindices, domain=domain, datadir=datadir).data[0]\n\n hs_cmp = np.zeros([np.shape(ws)[0],np.shape(ws)[1],3])\n tp_cmp = np.zeros([np.shape(ws)[0],np.shape(ws)[1],3])\n dir_cmp = np.zeros([np.shape(ws)[0],np.shape(ws)[1],3])\n\n #offdepth = np.ones(np.shape(ws)[1])*50.0\n #offdepth = d.variables['dpt'][0,xymod]\n\n hs_cmp[:,:,0] = wvrd.readWaveL1('VHM0_WW', cycle, leadtimes=leadtimes, xyindices=xyindices, domain=domain, datadir=datadir).data\n hs_cmp[:,:,1] = wvrd.readWaveL1('VHM0_SW1', cycle, leadtimes=leadtimes, xyindices=xyindices, domain=domain, datadir=datadir).data\n hs_cmp[:,:,2] = wvrd.readWaveL1('VHM0_SW2', cycle, leadtimes=leadtimes, xyindices=xyindices, domain=domain, datadir=datadir).data\n tp_cmp[:,:,0] = wvrd.readWaveL1('VTPK_WW', cycle, leadtimes=leadtimes, xyindices=xyindices, domain=domain, datadir=datadir).data\n tp_cmp[:,:,1] = wvrd.readWaveL1('VTPK_SW1', cycle, leadtimes=leadtimes, xyindices=xyindices, domain=domain, datadir=datadir).data\n tp_cmp[:,:,2] = wvrd.readWaveL1('VTPK_SW2', cycle, leadtimes=leadtimes, xyindices=xyindices, domain=domain, datadir=datadir).data\n dir_cmp[:,:,0] = wvrd.readWaveL1('VMDR_WW', cycle, leadtimes=leadtimes, xyindices=xyindices, domain=domain, datadir=datadir).data\n dir_cmp[:,:,1] = wvrd.readWaveL1('VMDR_SW1', cycle, leadtimes=leadtimes, xyindices=xyindices, domain=domain, datadir=datadir).data\n dir_cmp[:,:,2] = wvrd.readWaveL1('VMDR_SW2', cycle, leadtimes=leadtimes, xyindices=xyindices, domain=domain, datadir=datadir).data\n\n # return zero values not missing data!\n hs_cmp = np.maximum(0.0, hs_cmp)\n tp_cmp = np.maximum(0.0, tp_cmp)\n dir_cmp = np.maximum(0.0, dir_cmp)\n\n offshoredata = {'depth':offdepth, 'wspd':ws, 'wdir':wdir, 'hm0':hm0, 'tp':tp, 'dirn':dirn, 'hscmp':hs_cmp, 'tpcmp':tp_cmp, 'dircmp':dir_cmp}\n\n return fctimes, offshoredata" ]
[ "0.64421564", "0.6407596", "0.58654726", "0.57194644", "0.5639017", "0.56262916", "0.5592745", "0.5590848", "0.55444646", "0.5433761", "0.53545725", "0.53041834", "0.5269207", "0.5209733", "0.51773715", "0.51301134", "0.5103274", "0.50630826", "0.50591546", "0.5052191", "0.5037965", "0.5033745", "0.5029374", "0.49971598", "0.49871904", "0.4910537", "0.48963055", "0.48521876", "0.48179007", "0.48173136" ]
0.7402447
0
renders the `Dockerrun.aws.json` template
def render_template(self): apps = [{ 'name': container.name, 'image': container.image, 'environment': container.environment, 'memory': container.memory, 'portMappings': container.portmappings } for container in self.containers] t = self.templates.get_template('aws/containers.template') return t.render(apps=apps, family=self.family)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def template():\n\n return s3_rest_controller(rheader = s3db.dc_rheader)", "def render_application_template(self):\n self.pipeline_config['instance_links'] = self.retrieve_instance_links()\n jsondata = get_template(\n template_file='infrastructure/app_data.json.j2', appinfo=self.appinfo, pipeline_config=self.pipeline_config)\n return jsondata", "def render_dictionary(self): \n asset_json = {\n 'name': self.name,\n 'product_name': self.product_name,\n 'product_vendor': self.product_vendor,\n 'configuration': self.configuration,\n 'description': self.description,\n 'primary_users': self.primary_users,\n 'primary_voting': self.primary_voting,\n 'secondary_users': self.secondary_users,\n 'secondary_voting': self.secondary_voting,\n 'tags': self.tags,\n 'type': self.asset_type,\n 'action_whitelist': self.action_whitelist\n }\n\n if self.ingest_container_label:\n asset_json['ingest'] = {\n 'container_label': self.ingest_container_label,\n 'interval_mins': self.ingest_interval_mins,\n 'poll': self.ingest_poll,\n 'start_time_epoch_utc': self.ingest_start_time\n }\n\n return asset_json", "def visualise():\n\n column = request.form.getlist('columnName')\n regions = request.form.getlist('raw_regions')\n #take the single string and return a list\n regions = query_proc.prep_regions(regions)\n #get that tables of interst\n table = query_proc.column_to_table(column)\n\n var_data = query_proc.get_region_data(table, column, regions)\n minval = query_proc.get_region_data_min(table, column, regions)\n maxval = query_proc.get_region_data_max(table, column, regions)\n\n #column diction to get human fiendly designation\n column_dict = name_column.get_name_column_dict()\n real_column = column_dict[column[0]]\n\n\n ##packing for the template\n region = regions[0]\n min_max = [minval, maxval]\n step = query_proc.calc_steps(min_max)\n min_max.append(step)\n\n min_max = json.dumps(min_max)\n json_vardata = json.dumps(var_data)\n\n return render_template('visualise.html',\n title='Data on a Map!',\n column=column,\n real_column=real_column,\n region=region,\n min_max=min_max,\n json_vardata=json_vardata)", "def detailed_json(self, absolutize_url):\n template = {}\n template.update({\n \"id\": self.image_id,\n \"links\": self.links_json(absolutize_url),\n \"name\": self.name,\n \"minRam\": self.minRam,\n \"minDisk\": self.minDisk,\n \"OS-EXT-IMG-SIZE:size\": self.image_size,\n \"com.rackspace__1__ui_default_show\": self.is_default,\n \"created\": \"1972-01-01_15-59-11\",\n \"updated\": \"1972-01-01_15-59-11\",\n \"status\": \"ACTIVE\",\n \"progress\": 100,\n \"metadata\": self.metadata_json()\n })\n return template", "def detailed_json(self, absolutize_url):\n template = {}\n template.update({\n \"id\": self.image_id,\n \"links\": self.links_json(absolutize_url),\n \"name\": self.name,\n \"minRam\": self.minRam,\n \"minDisk\": self.minDisk,\n \"OS-EXT-IMG-SIZE:size\": self.image_size,\n \"com.rackspace__1__ui_default_show\": self.is_default,\n \"created\": \"1972-01-01_15-59-11\",\n \"updated\": \"1972-01-01_15-59-11\",\n \"progress\": 100,\n \"status\": \"ACTIVE\",\n \"metadata\": self.metadata_json()\n })\n return template", "def index() -> object:\n return render_template('ue_bootstrap.j2', title='UENERGO TAGS')", "def home():\n string = open('Tinder/static/data/data.json').read()\n json_data = json.loads(string)\n return flask.render_template('tinder.html', json_data=json_data)", "def index():\n return render_template(\"charts.html\")", "def render(self, data):\n logging.info(\"render (start)\")\n\n seria = json.dumps(data, ensure_ascii=False, indent=4)\n logging.info(\"rendered %s characters (end)\" % len(seria))\n return seria", "def json(data):\n if isinstance(data, dict):\n data = ujson.encode(data)\n uid = str(uuid.uuid4())\n display(HTML('<div id=\"{0}\" style=\"height: 600px; width:100%;\"></div>'.format(uid)))\n display(Javascript(\"\"\"\n require([\"https://rawgit.com/caldwell/renderjson/master/renderjson.js\"], function() {\n document.getElementById('%s').appendChild(renderjson(%s))\n });\n \"\"\" % (uid, data)))", "def resources(self) -> HTMLBody:\n\t\treturn render_template(\"resources.jinja2\")", "def daten():\n body_list = db.get_body()\n body_dict = {}\n for body in body_list:\n body_dict[str(body['_id'])] = body['name']\n data_list = []\n for file in os.listdir(app.config['data_dump_folder']):\n if file.endswith(\".tar.bz2\"):\n stat = os.lstat(app.config['data_dump_folder'] + os.sep + file)\n data_list.append({\n 'id': file.split('.')[0],\n 'name': body_dict[file.split('.')[0]],\n 'size': \"%d\" % (stat.st_size / 1024.0 / 1024.0)\n })\n file_list = []\n for file in os.listdir(app.config['files_dump_folder']):\n if file.endswith(\".tar.bz2\"):\n stat = os.lstat(app.config['files_dump_folder'] + os.sep + file)\n file_list.append({\n 'id': file.split('.')[0],\n 'name': body_dict[file.split('.')[0]],\n 'size': \"%d\" % (stat.st_size / 1024.0 / 1024.0 / 1024.0)\n })\n return render_template('daten.html', data_list=data_list, file_list=file_list)", "def abv_vs_style():\n return render_template(\"ABV_vs_style.html\")", "def smartadata():\n\n\treturn render_template('smartadata.html')", "def api_html():\n\n return jsonify({'version': __version__})", "def graphs_kelly():\n return render_template(\"graphs-Kelly.html\")", "def main():\n pods = openshift_object.get_running_pods()\n me = openshift_object.get_self()\n routes = openshift_object.get_routes()\n nodes = openshift_object.get_nodes()\n pvc = openshift_object.get_pvcs()\n pv = openshift_object.get_pv()\n project = openshift_object.get_projects()\n return jsonify({\n \"pods\": pods,\n \"me\": me,\n \"routes\": routes, \n \"nodes\":nodes,\n \"pvcs\":pvc,\n \"pv\":pv,\n \"projects\":project})", "def showStylesJSON():\n styles = session.query(Style).order_by(asc(Style.name))\n return jsonify(styles=[r.serialize for r in styles])", "def index():\n return render_template('home.jinja2')", "def dashboard_render(self,servers):\n THIS_DIR = os.path.dirname(os.path.abspath(__file__))\n j2_env = Environment(loader=FileSystemLoader(THIS_DIR),\n trim_blocks=True)\n new_dashboard = (j2_env.get_template('templating_dashboard.json').render(\n list_templating=self.templating(servers)\n ))\n return (new_dashboard)", "def render(data):\n if data is None:\n return ''\n\n if 'rendered_result' not in data:\n if 'result' not in data:\n data['rendered_result'] = ''\n else:\n make_pretty = True\n data['rendered_result'] = SEP2Renderer.export(data['result'], make_pretty)\n\n return data['rendered_result']", "def render(*, devices, vms, template, racks):\n jinjaEnv = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath=\"/\"))\n template = jinjaEnv.get_template(template)\n\n rendered = template.render(devices=devices, vms=vms, racks=racks)\n # remove empty lines, makes for more readable diffs\n return re.sub(r\"\\n\\s*\\n\", \"\\n\", rendered)", "def rawData():\n return render_template(\"data.html\")", "def template(c, release=\"url-shortener\"):\n c.run(f\"helm template {release} {HELM_CHART_DIR} > ./generated-deployment.yml\")", "def get_data_mrk():\n return render_template(\"l_markers.html\")", "def home():\n return render_template(\"d3_graph.html\")", "def index(self) -> HTMLBody:\n\t\treturn render_template(\"index.jinja2\")", "def main():\r\n return render_template(\"UI.html\")", "def main():\n states = storage.all(\"State\")\n return render_template('8-cities_by_states.html', states=states)" ]
[ "0.59908485", "0.5553491", "0.54366165", "0.52986646", "0.5238119", "0.52351195", "0.5205839", "0.51638323", "0.5104337", "0.50358963", "0.50227344", "0.49910554", "0.498", "0.49713457", "0.49624127", "0.49354434", "0.48771748", "0.48728493", "0.48441127", "0.48303962", "0.48259273", "0.48183832", "0.48154515", "0.47950676", "0.47867328", "0.4784652", "0.47571158", "0.4756343", "0.47466907", "0.47363064" ]
0.6472083
0
Creates an inmemory tarfile that will be used as the docker context
def create_docker_context(self): self.tarfile = io.BytesIO() with tarfile.open(fileobj=self.tarfile, mode="w|") as tar: for f in self.files: tarinfo = tarfile.TarInfo(f['name']) tarinfo.size = len(f['content']) if 'mode' in f: tarinfo.mode = f['mode'] tar.addfile(tarinfo, io.BytesIO(f['content'].encode('utf-8'))) self.tarfile.seek(0) # Reset from EOF
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _setup_input(self, g):\n tarbytes = io.BytesIO()\n with tempfile.NamedTemporaryFile() as f:\n g.serialize(f.name, format=\"turtle\")\n tar = tarfile.open(name=\"out.tar\", mode=\"w\", fileobj=tarbytes)\n tar.add(f.name, arcname=\"input.ttl\")\n tar.close()\n # seek to beginning so our file is not empty when docker sees it\n tarbytes.seek(0)\n return tarbytes", "def _outside_tar(self):\r\n outside_tar = self.unsafe_common_dir / \"unsafe_file.tar.gz\"\r\n with tarfile.open(outside_tar, \"w:gz\") as tar:\r\n tar.addfile(tarfile.TarInfo(str(self.content_dir / \"a_file\")))\r\n\r\n return outside_tar", "def _fifo_tar(self):\r\n fifop = self.unsafe_common_dir / \"fifo.file\"\r\n fifo_tar = self.unsafe_common_dir / \"fifo.tar.gz\"\r\n os.mkfifo(fifop)\r\n with tarfile.open(fifo_tar, \"w:gz\") as tar:\r\n tar.add(fifop)\r\n\r\n return fifo_tar", "def _outside_tar2(self):\r\n outside_tar = self.unsafe_common_dir / \"unsafe_file.tar.gz\"\r\n with tarfile.open(outside_tar, \"w:gz\") as tar:\r\n tar.addfile(tarfile.TarInfo(str(self.unsafe_common_dir / \"../a_file\")))\r\n\r\n return outside_tar", "def create(self, spec, force_cache=False, image_dir=\"~/.hyperkit\"):", "def create_tar(self):\n with tarfile.open(self.tgzfile, \"w:gz\") as tar_handle:\n for root, _, files in os.walk(self.dirname):\n for file in files:\n tar_handle.add(os.path.join(root, file))", "def create_tarball(scratch_dir, tarball_filename, cleanup=True):\n dirname, tmpdir = os.path.split(scratch_dir)\n with cd(dirname):\n with tarfile.open(tarball_filename, \"w:gz\") as tar:\n tar.add(scratch_dir, arcname=ZIP_DIRNAME)\n if cleanup:\n shutil.rmtree(scratch_dir)\n return os.path.join(dirname, tarball_filename)", "def create_artifact(current_revision):\n archive_path = '/tmp/{revision}.tar.gz'.format(revision=current_revision)\n local('tar -czf {archive_path} --exclude=.git *'.format(archive_path=archive_path))", "def tar_job(self):\n curr_dir = os.getcwd()\n\n os.chdir(self.temp_project_path)\n logging.log(level=logging.INFO, msg=\"Tarring up Filesystem and Environment\")\n tar_name = f\"{self.project_id}_fs\"\n tar_persistor = TarPersistor(base_file_name=tar_name,\n folder=\".\",\n paths_to_tar=os.listdir(),\n extract_path=False)\n _ = tar_persistor.persist()\n\n os.chdir(curr_dir)\n\n tar_path = os.path.join(self.temp_project_path, tar_name) + \".tar\"\n return tar_path", "def tar_file(self, name, contents, mtime=None):\n length = len(contents)\n tar_data = self.tar_file_header(name, length, mtime=mtime)\n tar_data += contents\n tar_data += self.tar_file_padding(length)\n return tar_data", "def create_tarball(fileobj, path, callback=None, compression_level=None):\n tar_cmd = [\"tar\", \"-zc\", \"--directory=%s\" % path, \".\"]\n env = os.environ.copy()\n if compression_level and 1 <= compression_level <= 9:\n env[\"GZIP\"] = \"-%d\" % compression_level\n tar_proc = make_subprocess(tar_cmd, stdout=True, stderr=True, env=env)\n\n try:\n while True:\n chunk = tar_proc.stdout.read(CHUNK_SIZE)\n if chunk == '':\n break\n\n if callback:\n callback(chunk)\n\n if fileobj:\n fileobj.write(chunk)\n except Exception:\n try_kill_process(tar_proc)\n raise\n\n finish_subprocess(tar_proc, tar_cmd)", "def do_pack():\n time_test = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n file_name = \"versions/web_static_\" + time_test + \".tgz\"\n command1 = \"mkdir -p versions\"\n command2 = \"tar -czvf \" + file_name + \" web_static\"\n local(command1)\n com = local(command2)\n if com.return_code == 0:\n return file_name\n else:\n return None", "def tmpfile(tmpdir_factory):\n\n def make(filename):\n fn = tmpdir_factory.mktemp(\"data\").join(filename)\n return fn\n\n # fn = tmpdir_factory.mktemp(\"data\").join(filename)\n return make", "def prepare():\n sh('docker build --rm -t {image} {dir}', image=IMAGE, dir=os.path.dirname(__file__))", "def _expand_archive(self, name):\r\n target = path(self.temp_dir) / uuid.uuid4().hex\r\n os.mkdir(target)\r\n with tarfile.open(self.data_dir / name) as tar_file:\r\n tar_file.extractall(path=target)\r\n\r\n return target", "def create_tarfile(source_dir, filename=\"/tmp/contents.tar.gz\"):\n try:\n # Define the default signal handler for catching: Ctrl-C\n signal.signal(signal.SIGINT, signal.default_int_handler)\n with tarfile.open(filename, \"w:gz\") as tar:\n tar.add(source_dir, arcname=os.path.basename(source_dir))\n\n except (OSError, IOError) as e:\n # OSError: [Errno 13] Permission denied\n if e.errno == errno.EACCES:\n source_dir = os.getcwd() if source_dir == '.' else source_dir # Expand cwd\n warn_purge_exit(info_msg=\"Permission denied. Removing compressed data...\",\n filename=filename,\n exit_msg=(\"Permission denied. Make sure to have read permission \"\n \"for all the files and directories in the path: %s\")\n % (source_dir))\n # OSError: [Errno 28] No Space Left on Device (IOError on python2.7)\n elif e.errno == errno.ENOSPC:\n dir_path = os.path.dirname(filename)\n warn_purge_exit(info_msg=\"No space left. Removing compressed data...\",\n filename=filename,\n exit_msg=(\"No space left when compressing your data in: %s.\\n\"\n \"Make sure to have enough space before uploading your data.\")\n % (os.path.abspath(dir_path)))\n\n except KeyboardInterrupt: # Purge tarball on Ctrl-C\n warn_purge_exit(info_msg=\"Ctrl-C signal detected: Removing compressed data...\",\n filename=filename,\n exit_msg=\"Stopped the data upload gracefully.\")", "def make_tar(self, package, input_dir, build_dir, add_args=None):\n tar = self.options.tar_command\n\n # Generate the .tar.gz file\n filename = package + '.tar.gz'\n out_file = open(os.path.join(build_dir, filename), \"w\")\n args = [tar, '--format=gnu', '--exclude-vcs', '-C', build_dir]\n if self.config.get('tar', {}).get('ignore', []):\n for patt in self.config['tar']['ignore']:\n args += ['--exclude', patt]\n if add_args:\n args += add_args\n args += ['-c', input_dir]\n logging.debug(\"Creating %s\", filename)\n tar_proc = subprocess.Popen(args, stdout=subprocess.PIPE)\n gzip_proc = subprocess.Popen(['gzip', '-9'], stdin=tar_proc.stdout,\n stdout=out_file)\n\n if tar_proc.wait() != 0 or gzip_proc.wait() != 0:\n logging.error(\"tar/gzip failed, exiting\")\n sys.exit(1)\n out_file.close()\n logging.info('%s written', filename)\n return filename", "def do_pack():\n\n datenow = datetime.now()\n full_date = datenow.strftime(\"%Y%m%d%H%M%S\")\n\n try:\n if not os.path.isdir(\"versions\"):\n local(\"mkdir versions\")\n local_command = local(\"tar -cvzf versions/web_static_{}.tgz web_static\"\n .format(full_date))\n return local_command\n except Exception:\n return None", "def do_pack():\n d = datetime.now()\n local(\"mkdir -p versions\")\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz\\\n'.format(d.year, d.month, d.day, d.hour, d.minute, d.second)\n status = local(\"tar -cvzf\" + file_name + \" ./web_static/\", capture=True)\n if status.succeeded:\n return file_name\n return None", "def do_pack():\n time = datetime.utcnow().strftime('%Y%m%d%H%M%S')\n file_name = \"versions/web_static_{}.tgz\".format(time)\n try:\n local(\"mkdir -p ./versions\")\n local(\"tar --create --verbose -z --file={} ./web_static\"\n .format(file_name))\n return file_name\n except:\n return None", "def do_pack():\n\n now = datetime.now()\n # format the name of the file with the timestamps\n now_year = now.year\n now_month = now.month\n now_day = now.day\n now_hour = now.hour\n now_minute = now.minute\n now_second = now.second\n # apply the format\n file_name = 'versions/web_static_{}{}{}{}{}{}.tgz'.format(\n now_year, now_month, now_day, now_hour, now_minute, now_second\n )\n # All archives must be stored in the folder versions\n local('mkdir -p versions')\n # execute locally the compression of the folder\n command = local(\"tar -cvzf \" + file_name + \" ./web_static/\")\n # return the archive path if the archive has been correctly generated\n if command.succeeded:\n return file_name\n else:\n return None", "def post_package():\n package_file = BytesIO()\n with tarfile.open(mode='w', fileobj=package_file) as tar:\n # metadata\n meta_content = b'encoding: utf-8\\npost: post.md'\n file_info = tarfile.TarInfo('package.yml')\n file_info.size = len(meta_content)\n tar.addfile(file_info, BytesIO(meta_content))\n\n # post\n post_content = b'''---\ntitle: A title\ntopic: A topic\n---\n\n[summary]\nA summary\n\nA paragraph\n'''\n file_info = tarfile.TarInfo('post.md')\n file_info.size = len(post_content)\n tar.addfile(file_info, BytesIO(post_content))\n package_file.seek(0)\n\n return package_file", "def _taradd(func, tar_file, name):\n with tempfile.NamedTemporaryFile('wb', delete=False) as temp_file:\n func(temp_file)\n temp_file.close()\n tar_file.add(temp_file.name, arcname=name)\n if os.path.isfile(temp_file.name):\n os.remove(temp_file.name)", "def test_unable_to_dump_tar_tmp_file(self, mocker):\n tar_file = mocker.MagicMock(name='tmp.tar')\n tar_file.addfile.side_effect = tarfile.TarError('Fail')\n\n tar_context = mocker.patch.object(tarfile, 'open')\n tar_context.return_value = tar_context\n tar_context.__enter__.return_value = tar_file\n\n payload = dict(id=\"stub_id\", data={\"some\": \"data\"})\n resp = self.client.post(self.url, json=payload)\n\n assert resp.status_code == 500\n assert resp.get_json() == {\n 'status': 'Error',\n 'type': 'TarError',\n 'message': 'Error during TAR.GZ creation: Fail'\n }", "def testDownloadDockerFile(self):\n expected_dockerfile = (\n '# Pseudo Dockerfile\\n'\n f'# Generated by de.py ({de_version})\\n\\n'\n 'COPY file:201f8f1849e89d53be9f6aa76937f5e209d745abfd15a8552fcf2ba45ab267f9'\n ' in / \\n'\n 'CMD [\"/hello\"]')\n\n with tempfile.TemporaryDirectory() as tmp_dir:\n self.dl_object._output_directory = tmp_dir\n self.dl_object.DownloadPseudoDockerfile()\n with open(os.path.join(tmp_dir, 'Dockerfile'), encoding='utf-8') as f:\n self.assertEqual(expected_dockerfile, f.read())", "def do_pack():\n now = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n local('mkdir -p versions')\n result = local('tar -czvf versions/web_static_{}.tgz web_static'\n .format(now))\n if result.failed:\n return None\n else:\n return result", "def pack():\n clean_local()\n build()\n copy_json()\n optimize()\n tarball()", "def create_temp_files(containers):\n for name in containers:\n run_cmd(f\"rm -rf /tmp/{name}.img\", True)\n for name in containers:\n run_cmd(f\"truncate -s 1G /tmp/{name}.img\", True)", "def make_temp_file():\n global TEST_DATA_PATH\n TEST_DATA_PATH = tempfile.mkstemp()", "def do_pack():\n try:\n if isdir('versions') is False:\n local(\"mkdir versions\")\n tgz_file = \"versions/web_static_{}.tgz\".format(\n time.strftime(\"%Y%m%d%H%M%S\"))\n local(\"tar -cvzf {} web_static\".format(tgz_file))\n return tgz_file\n except:\n return None" ]
[ "0.6867456", "0.6238185", "0.62085533", "0.6141972", "0.6079956", "0.6012043", "0.5987858", "0.59485483", "0.5887969", "0.5852579", "0.57223886", "0.57056576", "0.569733", "0.56587726", "0.5656461", "0.5641944", "0.5628549", "0.56112605", "0.55963767", "0.5593278", "0.5589891", "0.55782956", "0.55254", "0.5474365", "0.5471544", "0.5437888", "0.5429094", "0.5405738", "0.5381815", "0.537811" ]
0.8473896
0
runs docker build with the tarfile context
def build(self): docker = Client(version='auto') status = docker.build( fileobj=self.tarfile, custom_context=True, tag=self.tag, pull=True, nocache=True, rm=True, ) for line in status: # This effectively blocks on `docker build` try: current_app.logger.debug(line) except RuntimeError: # Outside of application context print line if "successfully built" not in line.lower(): raise BuildError("Failed to build {}: {}".format(self.tag, line)) self.built = True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def docker_build(c):\n cli_tasks.docker_build.run(c)", "def prepare():\n sh('docker build --rm -t {image} {dir}', image=IMAGE, dir=os.path.dirname(__file__))", "def main():\n parser = argparse.ArgumentParser(\n epilog=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n parser.add_argument(\n \"-d\", \"--dry-run\", action=\"store_true\", default=0, help=\"Dry run mode.\"\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n action=\"count\",\n default=0,\n help=\"Verbosity. Default is WARNING level.\",\n )\n\n subparsers = parser.add_subparsers(help=\"Sub commands\", dest=\"subparser\")\n subparsers.required = True\n\n build_parser = subparsers.add_parser(\n \"build\",\n description=\"Build an image from Dockerfile, caching image hierarchy\",\n help=\"Build an image from a Dockerfile\",\n )\n build_parser.add_argument(\n \"path\", metavar=\"PATH\", help=\"The build context directory\"\n )\n build_parser.add_argument(\n \"-f\",\n \"--file\",\n help=\"Name of the Dockerfile. If not provided, \"\n \"will use config.DOCKERFILE_PATH_PATTERN to compute. \",\n )\n build_parser.add_argument(\n \"-v\",\n \"--git-sha\",\n required=True,\n help=\"The version of code to build against, \" \"will pass as GIT_SHA variable\",\n )\n build_parser.add_argument(\n \"-n\", \"--name\", required=True, help=\"The name of the image to build\"\n )\n build_parser.add_argument(\n \"--build-arg\",\n metavar=\"ARG=VALUE\",\n nargs=\"*\",\n default=[],\n help=\"Set extra build-time variables. GIT_SHA, TIMESTAMP will be passed by default.\",\n )\n build_parser.add_argument(\n \"-r\",\n \"--raw\",\n action=\"store_true\",\n help=\"Whether to use raw docker build command to build, skipping caching logic\",\n )\n build_parser.add_argument(\n \"--registry\",\n default=config.DOCKER_REGISTRY,\n help=\"Docker registry use to determine the image identity, \"\n \"can be set via IMAGE_BUILDER_DOCKER_REGISTRY environment variable, \"\n 'or set DOCKER_REGISTRY in config.py. Default is \"%(default)s\"',\n )\n build_parser.add_argument(\n \"-t\",\n \"--tag-pattern\",\n default=config.GIT_SHA_TAG_PATTERN,\n help=\"Tag pattern, can only include one `{git_sha}` placeholder, \"\n 'such as \"{git_sha}-new\". If the tag exists, we won\\'t rebuild it. '\n 'Default is \"%(default)s\"',\n )\n build_parser.add_argument(\n \"-e\",\n \"--extra-tag\",\n nargs=\"*\",\n default=[],\n help=\"Extra tags to tag to the final images\",\n )\n build_parser.add_argument(\n \"--extra-name\",\n nargs=\"*\",\n default=[],\n help=\"Extra name and optionally with a tag in the 'name:tag' format\",\n )\n build_parser.add_argument(\n \"-o\", \"--output-hash\", help=\"The output filename of the files hash log.\"\n )\n build_parser.set_defaults(func=build)\n\n args = parser.parse_args()\n if args.dry_run:\n # DRY_RUN env will be read in image_builder.libs.process\n os.environ[\"DRY_RUN\"] = \"1\"\n\n if args.func == build:\n args.path = expand_path(args.path)\n if args.output_hash:\n args.output_hash = expand_path(args.output_hash)\n\n args.file = args.file or locate_dockerfile(args.name)\n args.file = expand_path(args.file)\n # set environ for main dockerfile for possibly retrieving later\n os.environ[\n config.DOCKERFILE_ENV_PATTERN.format(image_name=args.name)\n ] = args.file\n\n # change CWD to PATH\n os.chdir(args.path)\n\n if not args.registry:\n parser.error(\n \"--registry should be provied \"\n \"or specified by IMAGE_BUILDER_DOCKER_REGISTRY environment variable or set DOCKER_REGISTRY in config.py\"\n )\n if not all(\"=\" in kv for kv in args.build_arg):\n parser.error(\"--build_arg must be in ARG=VALUE format\")\n\n # set git_sha_tag\n try:\n args.git_sha_tag = args.tag_pattern.format(git_sha=args.git_sha)\n except KeyError:\n parser.error(\n 'Wrong --tag-pattern provided. Can only include one `{git_sha}` placeholder, such as \"{git_sha}-new\"'\n )\n\n # setup logging\n level = logging.WARNING - args.verbose * 10\n logging.basicConfig(\n level=level, format=\"%(asctime)s %(name)s %(levelname)s %(message)s\"\n )\n\n if args.output_hash:\n h = logging.FileHandler(args.output_hash)\n h.setLevel(logging.DEBUG)\n h.setFormatter(logging.Formatter(\"%(message)s\"))\n hash_logger.addHandler(h)\n\n # Suppress warning when we don't verify ssl\n import urllib3\n\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\n return args.func(args)", "def build_docker(c):\n tag = c.run('git describe', hide=True)\n docker_img = f'{docker_repo}:{tag.stdout.strip()}'\n c.run(f'docker build -t {docker_img} .')", "def create_docker_context(self):\n\n self.tarfile = io.BytesIO()\n\n with tarfile.open(fileobj=self.tarfile, mode=\"w|\") as tar:\n for f in self.files:\n tarinfo = tarfile.TarInfo(f['name'])\n tarinfo.size = len(f['content'])\n if 'mode' in f:\n tarinfo.mode = f['mode']\n tar.addfile(tarinfo, io.BytesIO(f['content'].encode('utf-8')))\n self.tarfile.seek(0) # Reset from EOF", "def build(parser):\n parser.add_argument(\n '-i', '--identity-file',\n help=(\n 'A SSH private key file which may be used to pull down '\n 'repositories when building.'\n ),\n )\n parser.add_argument(\n '-e', '--env',\n action='append',\n default=[],\n help=(\n 'Add environ variables to the build. These may be accessed in '\n 'the build scripts. Each variable should be of the format '\n 'KEY=VALUE. This may be used to pass in credentials required '\n 'to access private repositories. May be specified more than once.'\n ),\n )\n parser.add_argument(\n '-b', '--build-dir',\n default=os.getcwd(),\n help=(\n 'This folder should be accessible from the docker instance.'\n ),\n )\n parser.add_argument(\n '--archive',\n help=(\n 'Archive the build files into a local tarball.'\n ),\n )\n parser.add_argument(\n '--archive-only',\n action='store_true',\n default=False,\n help=(\n 'Skip tagging and building the runner image.'\n ),\n )\n parser.add_argument(\n '-t', '--tag',\n help=(\n 'Tag to apply to the built image. '\n 'This will default to the current date/time.'\n ),\n )\n parser.add_argument(\n '--no-cache',\n dest='use_cache',\n action='store_false',\n default=True,\n help=(\n 'Do not mount a cache volume when compiling the app.'\n ),\n )\n parser.add_argument(\n '--cache',\n metavar='CONTAINER:PATH',\n help=(\n 'An optional volume or location for the cache. The format is '\n '\"<volume_id>:<path>\" where the \"volume_id\" must be the '\n 'name or hash of an existing volume. The \"path\" is an absolute '\n 'path to the cache folder/volume within the build container.'\n '\\n\\n'\n 'By default a container will be created by mangling the name of '\n 'the app by appending \"__buildcache\" (e.g. \"myapp__buildcache\").'\n '\\n\\n'\n 'This option is ignored if --no-cache is specified.'\n '\\n\\n'\n 'The \"volume_id\" may be an absolute path on the host filesystem.'\n '\\n\\n'\n 'The \"path\" may be dropped, in which case it will default to '\n '/tmp/cache inside the build container.'\n '\\n\\n'\n 'Examples:'\n '\\n\\n'\n ' # custom volume with default path\\n'\n ' --cache my_cache'\n '\\n\\n'\n ' # custom path inside of volume\\n'\n ' --cache my_cache:/tmp/cache'\n '\\n\\n'\n ' # host filesystem\\n'\n ' --cache /tmp/cache'\n ),\n )\n parser.add_argument(\n '--rebuild-cache',\n action='store_true',\n default=False,\n help=(\n 'Delete any cached artifacts prior to building.'\n ),\n )\n parser.add_argument(\n '--skip-cleanup',\n action='store_true',\n default=False,\n help=(\n 'Skip removal of images and containers.'\n ),\n )\n parser.add_argument(\n 'app',\n help=(\n 'Path to an application folder with a meta.yml file'\n ),\n )", "def build(self, source, config, repo, tag):\n check_blacklist(repo)\n env = ' '.join(\"{}='{}'\".format(\n k, v.encode('unicode-escape').replace(\"'\", \"\\\\'\")) for k, v in config.viewitems())\n dockerfile = \"FROM {}\\nENV {}\".format(source, env)\n f = io.BytesIO(dockerfile.encode('utf-8'))\n target_repo = \"{}/{}:{}\".format(self.registry, repo, tag)\n logger.info(\"Building Docker image {}\".format(target_repo))\n with SimpleFlock(self.FLOCKFILE, timeout=1200):\n stream = self.client.build(fileobj=f, tag=target_repo, stream=True, rm=True)\n log_output(stream)", "def build_image(image, build_args):\n\n subprocess.check_call(['docker', 'build', '--no-cache', '--force-rm', '-t',\n image] + build_args)", "def build(context, cache=True, force_rm=False, hide=False):\n python_name = f\"{IMAGE_NAME}-{IMAGE_VER}\"\n docker_name = f\"{IMAGE_NAME}:{IMAGE_VER}\"\n\n print(f\"Building Python package {python_name}\")\n run_cmd(\n context=context,\n exec_cmd=\"poetry build\",\n pty=False,\n error_message=f\"Failed to build Python package {python_name}\",\n )\n\n print(f\"Building Docker image {docker_name}\")\n command = (\n f\"docker build --tag {docker_name} \"\n f\"--build-arg LMA_VERSION={IMAGE_VER} --build-arg WHEEL_DIR=dist \"\n f\"-f Dockerfile .\"\n )\n\n if not cache:\n command += \" --no-cache\"\n if force_rm:\n command += \" --force-rm\"\n\n run_cmd(\n context=context,\n exec_cmd=command,\n pty=False,\n hide=hide,\n error_message=f\"Failed to build Docker image {docker_name}\",\n )", "def _build(build_context,\n image_tag,\n image_name,\n nocache,\n credstore_env=None,\n registries=None):\n _logger.info('Starting build ...')\n\n # Build the image\n docker_builder = DockerBuilder(\n build_context=build_context,\n image_name=image_name,\n image_tag=image_tag,\n credstore_env=credstore_env,\n registries=registries,\n )\n docker_builder.login_private_registries()\n if docker_builder.check_image():\n # Image already built\n docker_builder.clean()\n return docker_builder\n if not docker_builder.build(nocache=nocache):\n docker_builder.clean()\n raise BuildException('The docker image could not be built.')\n return docker_builder", "def build_base():\n with lcd(env.local_path):\n put('./requirements.txt', '/srv/build/requirements.txt')\n\n with cd('/srv/build'):\n run('docker build -t {base_image_name} .'.format(\n base_image_name=env.base_image_name,\n ))", "def build(project_name, compose_file, container, container_dir_to_copy, env):\n compose_build(project_name, compose_file, container, container_dir_to_copy, env=env)", "def build_orc8r():\n subprocess.check_call('./build.py -a', shell=True, cwd=orc8_docker_path)", "def test_docker_build(rule_runner: RuleRunner) -> None:\n rule_runner.write_files(\n {\n \"src/BUILD\": \"docker_image(name='test-image', image_tags=['1.0'])\",\n \"src/Dockerfile\": \"FROM python:3.8\",\n }\n )\n target = rule_runner.get_target(Address(\"src\", target_name=\"test-image\"))\n result = run_docker(rule_runner, target)\n assert len(result.artifacts) == 1\n assert len(result.artifacts[0].extra_log_lines) == 2\n assert \"Built docker image: test-image:1.0\" == result.artifacts[0].extra_log_lines[0]\n assert \"Docker image ID:\" in result.artifacts[0].extra_log_lines[1]\n assert \"<unknown>\" not in result.artifacts[0].extra_log_lines[1]", "def build_docker(params) -> None:\n print(\"Building docker image...\")\n cmd = \"cd bg_changer && docker build --tag bg_changer . >/dev/null 2>&1\"\n if os.system(cmd) == 0:\n print(\" Success !\")\n else:\n print(\" Failure !\")", "def build(config, version_tag):\n config_dict = get_config(config)\n image_name = config_dict['IMAGE'].split(':')[0]\n image = '{}:{}'.format(image_name, version_tag)\n base_image = config_dict['BASE_IMAGE']\n\n cmd = 'docker build -t {image} --build-arg base_image={base_image} .'.format(image=image,\n base_image=base_image)\n with cd(env.project_dir):\n run(cmd)\n return image", "def build(obj):\n logger.info(ASCII_LOGO)\n logger.info(\"Started building SageMaker Docker image. It will take some minutes...\\n\")\n\n try:\n config_file_path = os.path.join('.sagify.json')\n if not os.path.isfile(config_file_path):\n raise ValueError()\n\n config = ConfigManager(config_file_path).get_config()\n api_build.build(\n source_dir=config.sagify_module_dir,\n requirements_dir=config.requirements_dir,\n docker_tag=obj['docker_tag'],\n image_name=config.image_name,\n python_version=config.python_version)\n\n logger.info(\"Docker image built successfully!\")\n except ValueError:\n logger.info(\"This is not a sagify directory: {}\".format(dir))\n sys.exit(-1)\n except subprocess.CalledProcessError as e:\n logger.debug(e.output)\n raise\n except Exception as e:\n logger.info(\"{}\".format(e))\n sys.exit(-1)", "def _build_container(\n self, target_image, odcs, repo_type, repo_list, terminate_event,\n scratch, record):\n self.logger.info(\"Building image: %s\" % target_image)\n cmd_list = [\"rhpkg\", \"--path=%s\" % self.distgit_dir]\n\n if self.runtime.user is not None:\n cmd_list.append(\"--user=%s\" % self.runtime.user)\n\n cmd_list += (\n \"container-build\",\n \"--nowait\",\n )\n\n if odcs:\n if odcs == 'signed':\n odcs = 'release' # convenience option for those used to the old types\n cmd_list.append('--signing-intent')\n cmd_list.append(odcs)\n else:\n if repo_type:\n repo_list = list(repo_list) # In case we get a tuple\n repo_list.append(self.metadata.cgit_url(\".oit/\" + repo_type + \".repo\"))\n\n if repo_list:\n # rhpkg supports --repo-url [URL [URL ...]]\n cmd_list.append(\"--repo-url\")\n cmd_list.extend(repo_list)\n\n if scratch:\n cmd_list.append(\"--scratch\")\n\n # Run the build with --nowait so that we can immediately get information about the brew task\n rc, out, err = exectools.cmd_gather(cmd_list)\n\n if rc != 0:\n # Probably no point in continuing.. can't contact brew?\n self.logger.info(\"Unable to create brew task: out={} ; err={}\".format(out, err))\n return False\n\n # Otherwise, we should have a brew task we can monitor listed in the stdout.\n out_lines = out.splitlines()\n\n # Look for a line like: \"Created task: 13949050\" . Extract the identifier.\n task_id = next((created_line.split(\":\")[1]).strip() for created_line in out_lines if\n created_line.startswith(\"Created task:\"))\n\n record[\"task_id\"] = task_id\n\n # Look for a line like: \"Task info: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=13948942\"\n task_url = next((info_line.split(\":\", 1)[1]).strip() for info_line in out_lines if\n info_line.startswith(\"Task info:\"))\n\n self.logger.info(\"Build running: {}\".format(task_url))\n\n record[\"task_url\"] = task_url\n\n # Now that we have the basics about the task, wait for it to complete\n error = watch_task(self.logger.info, task_id, terminate_event)\n\n # Looking for something like the following to conclude the image has already been built:\n # BuildError: Build for openshift-enterprise-base-v3.7.0-0.117.0.0 already exists, id 588961\n if error is not None and \"already exists\" in error:\n self.logger.info(\"Image already built against this dist-git commit (or version-release tag): {}\".format(target_image))\n error = None\n\n # Gather brew-logs\n logs_dir = \"%s/%s\" % (self.runtime.brew_logs_dir, self.metadata.name)\n logs_rc, _, logs_err = exectools.cmd_gather([\"brew\", \"download-logs\", \"-d\", logs_dir, task_id])\n\n if logs_rc != 0:\n self.logger.info(\"Error downloading build logs from brew for task %s: %s\" % (task_id, logs_err))\n\n if error is not None:\n # An error occurred. We don't have a viable build.\n self.logger.info(\"Error building image: {}, {}\".format(task_url, error))\n return False\n\n self.logger.info(\"Successfully built image: {} ; {}\".format(target_image, task_url))\n return True", "def build(ctx):\n if 'cicd' in run('hostname').stdout.strip():\n # Check if we are executing the task from an aws instance\n if requests.get('http://169.254.169.254/latest/meta-data/').status_code == 200:\n git_ref_source = os.environ.get('GIT_SOURCE_BRANCH')\n git_ref_target = os.environ.get('GIT_TARGET_BRANCH')\n run('git fetch --all')\n run('git checkout {}'.format(git_ref_target))\n\n \n tar_name = \"Frontend\"\n #'wordpress-{}-en_CA.tar.gz'.format(WORDPRESS_VERSION)\n #tar_file = open(tar_name, 'wb')\n #tar_file.write(wp_tar.content)\n #tar_file.close()\n\n #run('tar -xzf {}'.format(tar_name))\n \n # Download the postmedia source-code and patches/config\n #clone(git_ref_target, git_ref_source)\n\n # merge (if applicable) and create the release\n if git_ref_source:\n git_pr_id = os.getenv('GIT_PR_ID')\n github_util.put('repos/{}/{}/pulls/{}/merge'.format(GIT_ORG, GIT_REPO, git_pr_id), params={'merge_method': 'squash'})\n version = github_util.get_next_rc()\n github_util.set_release(target_commitish='master', tag=version, prerelease=True)\n build_type = 'release candidate'\n else:\n version = github_util.get_next_hf()\n github_util.set_release(git_ref_target, version)\n build_type = 'hotfix'\n\n # package and upload to S3\n author = os.environ.get('GIT_AUTHOR')\n notes = release_notes(version, author, git_ref_target, git_ref_source, build_type)\n tarball = package(notes, version)\n print(\"No upload to S3\")\n #upload(tarball, S3_BUCKET_STAGE)\n else:\n author = input('please enter your name for the release notes: ')\n\n valid_snapshot_name = False\n while not valid_snapshot_name:\n snapshot_name = input('please enter a name for your snapshot: ')\n snapshot_name = snapshot_name.lower()\n snapshot_name = re.sub('-', '_', snapshot_name)\n\n # domain sections cannot be longer than 63 characters, so snapshot\n # name cannot be longer than 26 (63 minus snapshot-20190128-1713-homesanddesign - 37)\n if (len(snapshot_name) <= 26):\n valid_snapshot_name = True\n else:\n print(\"{} is too long. Please enter a new snapshot name of 28 characters or less.\".format(snapshot_name))\n\n build_type = 'snapshot'\n \n version = '{}_{}_{}'.format(build_type, snapshot_name,\n datetime.datetime.now().strftime(\"%Y%m%d_%H%M\"))\n print(\"Building snapshot {}\".format(version))\n git_ref_target = 'master'\n git_ref_source = 'HEAD'\n notes = release_notes(version, author, git_ref_target, git_ref_source, build_type)\n os.chdir('/opt/')\n if os.path.exists(WORK_DIR):\n os.system('rm -rf {}'.format(WORK_DIR))\n os.mkdir(WORK_DIR)\n tarball = package(notes, version)\n print (\"No upload to S3\")\n #upload(tarball, S3_BUCKET_DEV)", "def build(\n self,\n context: \"Directory\",\n dockerfile: Optional[str] = None,\n build_args: Optional[Sequence[BuildArg]] = None,\n target: Optional[str] = None,\n secrets: Optional[Sequence[\"Secret\"]] = None,\n ) -> \"Container\":\n _args = [\n Arg(\"context\", context),\n Arg(\"dockerfile\", dockerfile, None),\n Arg(\"buildArgs\", build_args, None),\n Arg(\"target\", target, None),\n Arg(\"secrets\", secrets, None),\n ]\n _ctx = self._select(\"build\", _args)\n return Container(_ctx)", "def _Build(self, image):\n image = _ContainerImage(image)\n build_start = time.time()\n if not FLAGS.local_container_build:\n try:\n # Build the image remotely using an image building service.\n self.RemoteBuild(image)\n self.remote_build_times[image.name] = time.time() - build_start\n return\n except NotImplementedError:\n pass\n\n self.PrePush(image)\n # Build the image locally using docker.\n build_start = time.time()\n self.LocalBuildAndPush(image)\n self.local_build_times[image.name] = time.time() - build_start", "def cli(context):\n dev = f\"docker run -it -v {PWD}:/local {IMAGE_NAME}:{IMAGE_VER} /bin/bash\"\n print(f\"{dev}\")\n context.run(f\"{dev}\", pty=True)", "def build_image(self, df_path):\n cmd = \"{} build -f {} -t {} .\".format(\n self.binary, df_path, self.vars['image'])\n LOG.info(\"Running: {}\".format(cmd))\n res = subprocess.run(cmd, shell=True)\n if res.returncode != 0:\n sys.exit(2)\n return res", "def main():\n extensions = os.getenv('EXTENSIONS', DEFAULT_EXTENSIONS).split(',')\n extensions.sort()\n docker_contents = []\n contents = travis_contents()\n data = yaml.safe_load(contents)\n\n # set the version\n php_versions = data.get('php', [DEFAULT_VERSION])\n php_version = php_versions[0]\n docker_contents.append('FROM php:{0}'.format(php_version))\n\n # ensure all the php shit exists\n\n # LC_ALL=en_US.UTF-8\n docker_contents.append('ENV DEBIAN_FRONTEND=noninteractive LC_ALL=C DOCKER=1') # noqa\n docker_contents.append('RUN apt-get update')\n docker_contents.append('RUN apt-get -qq install -qq -y php5-cli php-pear')\n # for composer\n docker_contents.append('RUN apt-get -qq install -qq -y git-core')\n # for curl\n docker_contents.append('RUN apt-get -qq install -qq -y libcurl4-openssl-dev')\n # for intl\n docker_contents.append('RUN apt-get -qq install -qq -y libicu-dev')\n\n # installs user-specified packages\n packages = os.getenv('PACKAGES', '')\n if len(os.getenv('PACKAGES', '')) > 0:\n packages = packages.split(',')\n docker_contents.append('RUN apt-get -qq install -qq -y {0}'.format(\n ' '.join(packages)\n ))\n\n for extension in extensions:\n if extension in available_extensions:\n docker_contents.append('RUN docker-php-ext-install {0}'.format(\n extension\n ))\n else:\n docker_contents.append('RUN apt-get -qq install -qq -y php5-{0} && pecl install -o -f {0} && \\\\'.format(extension))\n docker_contents.append(' rm -rf /tmp/pear && \\\\')\n if extension in ZEND_EXTENSIONS:\n docker_contents.append(' echo \"zend_extension=/usr/local/lib/php/extensions/no-debug-non-zts-{0}/xdebug.so\" > /usr/local/etc/php/conf.d/{1}.ini'.format(\n phpextension_paths[php_version],\n extension\n ))\n else:\n docker_contents.append(' echo \"extension={0}.so\" > /usr/local/etc/php/conf.d/{0}.ini'.format(extension))\n\n # ensure we have all the proper php testing stuff\n docker_contents.append('RUN \\\\')\n docker_contents.append(' curl -sSL https://phar.phpunit.de/phpunit-old.phar > phpunit.phar && \\\\')\n docker_contents.append(' curl -sS https://getcomposer.org/installer | php && \\\\')\n docker_contents.append(' mv composer.phar /usr/local/bin/composer && \\\\')\n docker_contents.append(' mv phpunit.phar /usr/local/bin/phpunit && \\\\')\n docker_contents.append(' chmod +x /usr/local/bin/composer /usr/local/bin/phpunit && \\\\')\n docker_contents.append(' phpunit --version')\n\n # set the environment\n environments = data.get('env', {'matrix': 'CI=1'}).get('matrix', [])\n docker_env = environments[0]\n docker_contents.append('ENV {0}'.format(docker_env))\n\n docker_contents.append('ADD composer.json /app/composer.json')\n docker_contents.append('WORKDIR /app')\n docker_contents.append('RUN echo \"date.timezone = UTC\" > /usr/local/etc/php/conf.d/timezone.ini') # noqa\n\n for script in data.get('before_script', []):\n docker_contents.append('RUN {0}'.format(script))\n\n docker_contents.append('ADD . /app')\n\n # HACK\n docker_contents.append('ENV COVERALLS=1 DEFAULT=1 PHPCS=1')\n\n for script in data.get('script', []):\n docker_contents.append('RUN {0}'.format(script))\n\n with open('{0}/Dockerfile'.format(os.getcwd()), 'w') as f:\n for line in docker_contents:\n f.write(\"{0}\\n\\n\".format(line))", "def build(self, update=False, local=True, remote=True, write=False):\n log.debug(\"Building %s\" % self.name)\n dockerfile = write and not os.path.exists(os.path.join(self.get_directory(), \"Dockerfile\"))\n self._build(LocalPackaging(self.get_directory(), write), update, local, remote, dockerfile)", "def dockerfile() -> co.Exec:\n image = co.Image(dockerfile=\"./docker/Dockerfile.simple\")\n return co.Exec(\n f\"python -c '{pretty_table_script}'\", image=image, doc=co.util.magic_doc()\n )", "def _do_build(self) -> List[types.Action]:\n return [\n docker_command.DockerRun(\n command=[\"/entrypoint.sh\", self.tag],\n builder=builder.GO_BUILDER,\n run_config=docker_command.default_run_config(\n constants.STORAGE_OPERATOR_ROOT / \"entrypoint.sh\"\n ),\n mounts=[\n utils.bind_mount(\n target=Path(\"/storage-operator\"),\n source=constants.STORAGE_OPERATOR_ROOT,\n ),\n # This container (through operator-sdk) will call `docker\n # build`, so we need to expose our Docker socket.\n utils.bind_mount(\n target=Path(\"/var/run/docker.sock\"),\n source=Path(\"/var/run/docker.sock\"),\n ),\n ],\n )\n ]", "def _build(registry, image_name, git_sha):\n\n image = f\"{registry}/{image_name}\"\n\n # If this image is not in our registry, nothing we can do.\n if registry != config.DOCKER_REGISTRY:\n return \"\"\n\n # If image:{git_sha} already exists, then return.\n logger.info(\n f\"Pulling git_sha tag {image}:{git_sha} to check if it already exists\"\n )\n if (\n not args.dry_run\n and docker_silent.pull(f\"{image}:{git_sha}\").returncode == 0\n ):\n # better to find the hash-{hash} of this image, and return hash\n # but currently, it is not easy to find all tags of the same image digest through registry API.\n # so we return image digest instead.\n digest = get_image_digest(f\"{image}:{git_sha}\")\n logger.info(\n f\"git_sha tag {image}:{git_sha} already exists, digest: %s\", digest\n )\n if not digest:\n raise Exception(\"Failed to get digest for existing image\")\n _tag_to_extra_tags(args, image, git_sha)\n return digest\n\n # Enter build context directory if it is specified\n build_context = enter_build_context(image_name)\n\n # Parse .dockerignore in build context\n dockerignore_files_set = parse_dockerignore(build_context)\n\n # Check if the dockerfile exists\n dockerfile_path = locate_dockerfile(image_name)\n if not os.path.isfile(dockerfile_path):\n logger.error(\n \"%s not exists or is not a file, so %s cannot get build\",\n dockerfile_path,\n image_name,\n )\n raise Exception(\"Building cannot continue\")\n\n dockerfile = Dockerfile(dockerfile_path, build_arg=build_arg)\n\n hasher = sha256()\n\n # Build parents, and calc parents hash\n for parent_image in dockerfile.parent_images:\n (\n parent_image_registry,\n parent_image_name,\n parent_image_tag,\n ) = parse_docker_image_identity(parent_image)\n parent_digest = _build(\n parent_image_registry, parent_image_name, parent_image_tag\n )\n if parent_digest is None:\n raise Exception(f\"Failed to get parent_digest for {image}\")\n hasher.update(parent_digest.encode())\n hash_logger.info(\n \"parent: %s, digest: (%s, %s), hash: %s\",\n parent_image,\n parent_digest,\n parent_digest.encode(),\n hasher.hexdigest(),\n )\n\n # Calc current image files hash\n\n def update_file_hash(f):\n if not os.path.isfile(f):\n return\n if f in dockerignore_files_set:\n hash_logger.debug(\"ignore: %s\", f)\n return\n with open(f, \"rb\") as open_file:\n buf = open_file.read(config.READ_FILE_BLOCKSIZE)\n while len(buf) > 0:\n hasher.update(buf)\n buf = open_file.read(config.READ_FILE_BLOCKSIZE)\n hash_logger.info(\"update: %s, hash: %s\", f, hasher.hexdigest())\n\n srcs = [dockerfile_path] + dockerfile.copied_srcs + dockerfile.added_srcs\n # TODO: if the src is a url, download it and hash it (even crane didn't do that)\n for src in srcs:\n for f in sorted(glob.glob(src)):\n # We match every file in a directory recursively\n if os.path.isdir(f):\n for sub_f in sorted(glob.glob(f\"{f}/**\", recursive=True)):\n update_file_hash(sub_f)\n else:\n update_file_hash(f)\n\n files_hash = hasher.hexdigest()\n hash_logger.info(\"image: %s, hash: %s\", image, files_hash)\n\n hash_tag = config.FILES_HASH_TAG_PATTERN.format(files_hash=files_hash)\n # FIXME(harry): hack, remove this\n old_hash_image = f\"docker-registry.example.com:5000/{image_name}:{hash_tag}\"\n\n logger.info(\n f\"Pulling files_hash tag {image}:{hash_tag} to check if it already exists\"\n )\n # If image:hash-{hash} already exists,\n # then content didn't change, return.\n # We just need to tag it to latest code version.\n if (\n not args.dry_run\n and docker_silent.pull(f\"{image}:{hash_tag}\").returncode == 0\n ):\n logger.info(\n f\"files_hash tag {image}:{hash_tag} already exists, \"\n \"it means content didn't change, we can just tag the old image to new git_sha version tag\"\n )\n # FIXME(harry): hack, remove this\n elif not args.dry_run and docker_silent.pull(old_hash_image).returncode == 0:\n logger.info(f\"NOTE: files_hash tag {old_hash_image} already exists!\")\n # tag and push this hash image\n if docker.tag(old_hash_image, f\"{image}:{hash_tag}\").returncode != 0:\n logger.error(\"Failed to tag old hash image\")\n return\n if docker.push(f\"{image}:{hash_tag}\").returncode != 0:\n logger.error(\"Failed to push hash_tag image\")\n return\n # If image:hash-{hash} not exists, then build it from Dockerfile.\n else:\n logger.info(\n f\"files_hash tag {image}:{hash_tag} dosen't exists, \"\n \"it means content may changed, gonna build it from Dockerfile\"\n )\n if build_with_raw_command(args, image, dockerfile_path, hash_tag) != 0:\n logger.error(f\"Failed to build {image}:{hash_tag}\")\n return\n if docker.push(f\"{image}:{hash_tag}\").returncode != 0:\n logger.error(f\"Failed to push image\")\n return\n logger.info(f\"image files_hash tag {image}:{hash_tag} is pushed\")\n\n # tag and push this final image\n if docker.tag(f\"{image}:{hash_tag}\", f\"{image}:{git_sha}\").returncode != 0:\n logger.error(\"Failed to tag image\")\n return\n _tag_to_extra_tags(args, image, git_sha)\n if docker.push(f\"{image}:{git_sha}\").returncode != 0:\n logger.error(\"Failed to push image\")\n return\n digest = get_image_digest(f\"{image}:{git_sha}\")\n if not digest:\n logger.error(\"Failed to get digest for image\")\n return\n logger.info(f\"image {image}:{git_sha} is pushed, digest: {digest}\")\n\n return digest", "def docker_exec(cmdline):\n local('docker exec -ti {} {}'.format(project_name, cmdline))", "def build_runtime(self, docker_image_name, dockerfile):\n logger.debug('Building new docker image from Dockerfile')\n logger.debug('Docker image name: {}'.format(docker_image_name))\n\n entry_point = os.path.join(os.path.dirname(__file__), 'entry_point.py')\n create_handler_zip(k8s_config.FH_ZIP_LOCATION, entry_point, 'lithopsentry.py')\n\n if dockerfile:\n cmd = '{} build -t {} -f {} .'.format(k8s_config.DOCKER_PATH,\n docker_image_name,\n dockerfile)\n else:\n cmd = '{} build -t {} .'.format(k8s_config.DOCKER_PATH, docker_image_name)\n\n if logger.getEffectiveLevel() != logging.DEBUG:\n cmd = cmd + \" >{} 2>&1\".format(os.devnull)\n\n logger.info('Building runtime')\n res = os.system(cmd)\n if res != 0:\n raise Exception('There was an error building the runtime')\n\n self._delete_function_handler_zip()\n\n cmd = '{} push {}'.format(k8s_config.DOCKER_PATH, docker_image_name)\n if logger.getEffectiveLevel() != logging.DEBUG:\n cmd = cmd + \" >{} 2>&1\".format(os.devnull)\n res = os.system(cmd)\n if res != 0:\n raise Exception('There was an error pushing the runtime to the container registry')\n logger.debug('Building done!')" ]
[ "0.73519874", "0.7246165", "0.718203", "0.70525706", "0.6954098", "0.6796647", "0.66760755", "0.66742986", "0.6608287", "0.64849854", "0.6475837", "0.6449171", "0.6399748", "0.6398399", "0.63747525", "0.6352232", "0.6344589", "0.6330966", "0.63261646", "0.62892073", "0.625937", "0.6249296", "0.6207705", "0.6152961", "0.6146376", "0.61356676", "0.61319625", "0.61318046", "0.6113617", "0.6096116" ]
0.7421357
0
Pull the image and create a container with host_config
def setup(self): exists = [i for i in self.client.images() if self.image in i['RepoTags']] # Only pull the image if we don't have it if not exists or self.pull: self.client.pull(self.image) self.logger.debug("Pulled {}".format(self.image)) self.container = self.client.create_container( image=self.image, host_config=self.host_config, name=self.name, command=self.command, environment=self.environment ) self.logger.debug("Created container {}".format(self.container['Id']))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_container(client):\n client.images.build(path=os.path.join(os.path.abspath(\"\"), \"docker\"), tag=\"scrape_light\")", "def pull_image(image):\n\n subprocess.check_call(['docker', 'pull', image])", "def run(self, container_config: ContainerConfig) -> Container:", "def create(dockerfile):\n\n path = os.path.dirname(dockerfile)\n\n container_name = input('Enter container name: ')\n port = input('Enter port number to map TCP port 5000 in the container, to a port on the Docker host: ')\n\n try:\n image = CLIENT.images.build(path=path, dockerfile=dockerfile, tag=\"my_app_image\")\n # Run a container and map TCP port 5000 in the container to a given port on the Docker host.\n container = CLIENT.containers.run('my_app_image', detach=True, ports={'5000/tcp': port},\n name=container_name)\n click.secho(\"Container created with name: {}. App is running \"\n \"on http://0.0.0.0:{}/ on the host.\"\n .format(container_name, port), bg='blue', fg='white')\n except (docker.errors.APIError, TypeError, OSError) as err:\n print(err)", "def _create_docker_container(self):\n cwd = os.getcwd()\n\n # get a docker client\n docker_client = docker.from_env()\n docker_image = \"aca_build0:latest\"\n mount_pnt = docker.types.Mount(\"/mnt/alcor-control-agent\",\n f'''{cwd}/../..''',\n type='bind')\n\n mount_modules = docker.types.Mount(\"/lib/modules\",\n \"/lib/modules\",\n type='bind')\n\n # Create the container in privileged mode\n container = docker_client.containers.create(\n docker_image, '/bin/bash', tty=True,\n stdin_open=True, auto_remove=False, mounts=[mount_pnt, mount_modules],\n privileged=True, cap_add=[\"SYS_PTRACE\"],\n ports={str(aca_droplet.port_internal) + \"/tcp\": ('0.0.0.0', aca_droplet.port_external)},\n security_opt=[\"seccomp=unconfined\"], name=self.id)\n container.start()\n container.reload()\n\n # Increment the static external port number counter\n aca_droplet.port_external = aca_droplet.port_external + 1\n\n # Restart dependancy services\n container.exec_run(\"/etc/init.d/rpcbind restart\")\n container.exec_run(\"/etc/init.d/rsyslog restart\")\n container.exec_run(\"ip link set dev eth0 up mtu 9000\")\n\n # We may need to restart ovs\n # container.exec_run(\"/etc/init.d/openvswitch-switch restart\")\n\n # Create simlinks\n container.exec_run(\"ln -s /mnt/alcor-control-agent/mizar/build/bin /trn_bin\")\n container.exec_run(\"ln -s /mnt/alcor-control-agent/mizar/build/xdp /trn_xdp\")\n container.exec_run(\"ln -s /sys/fs/bpf /bpffs\")\n\n container.exec_run(\n \"ln -s /mnt/alcor-control-agent/build/ /aca_build\")\n\n # Run the transitd in the background\n container.exec_run(\"/trn_bin/transitd \",\n detach=True)\n\n # Enable debug and tracing for the kernel\n container.exec_run(\n \"mount -t debugfs debugfs /sys/kernel/debug\")\n container.exec_run(\n \"echo 1 > /sys/kernel/debug/tracing/tracing_on\")\n\n # Enable core dumps (just in case!!)\n container.exec_run(\"ulimit -u\")\n cmd = \"echo '/mnt/alcor-control-agent/mizar/core/core_{}_%e.%p' |\\\n tee /proc/sys/kernel/core_pattern \".format(self.ip)\n container.exec_run(cmd)\n\n self.container = container\n self.ip = self.container.attrs['NetworkSettings']['IPAddress']\n self.mac = self.container.attrs['NetworkSettings']['MacAddress']", "def build(self, source, config, repo, tag):\n check_blacklist(repo)\n env = ' '.join(\"{}='{}'\".format(\n k, v.encode('unicode-escape').replace(\"'\", \"\\\\'\")) for k, v in config.viewitems())\n dockerfile = \"FROM {}\\nENV {}\".format(source, env)\n f = io.BytesIO(dockerfile.encode('utf-8'))\n target_repo = \"{}/{}:{}\".format(self.registry, repo, tag)\n logger.info(\"Building Docker image {}\".format(target_repo))\n with SimpleFlock(self.FLOCKFILE, timeout=1200):\n stream = self.client.build(fileobj=f, tag=target_repo, stream=True, rm=True)\n log_output(stream)", "def create_image(name: str, distribution: str, apt_repo: str, release_track: str, release_label: str, flavour: str,\n organization: str, docker_registry: str, rosdistro_path: pathlib.Path, timestamp:str,\n publish: bool = False):\n\n # Read configuration files\n common_config = yaml.safe_load((rosdistro_path / 'config/recipes.yaml').open())['common']\n recipe = yaml.safe_load((rosdistro_path / 'config/images.yaml').open())['images']\n distro = recipe[name]['distro']\n build_type = recipe[name]['build_type']\n env = source_file(f'{os.environ[\"BUNDLE_ROOT\"]}/{distro}/setup.bash')\n today = timestamp\n extra_vars: List[Any] = []\n\n try:\n package = recipe[name]['package']\n provision_file = recipe[name]['provision_file']\n except KeyError:\n package = '/tailor-image'\n provision_file = f'{build_type}.yaml'\n\n env['ANSIBLE_CONFIG'] = find_package(package, 'ansible.cfg', env)\n template_path = f'/tailor-image/environment/image_recipes/{build_type}/{build_type}.json'\n provision_file_path = find_package(package, 'playbooks/' + provision_file, env)\n\n optional_vars = []\n optional_var_names = ['username', 'password', 'extra_arguments_ansible',\n 'ansible_command', 'description', 'disk_size', 'group']\n\n for var in optional_var_names:\n if var in recipe[name]:\n optional_vars.extend(['-var', f'{var}={recipe[name][var]}'])\n\n if build_type == 'docker':\n image_name = f'tailor-image-{name}-{distribution}-{release_label}'\n docker_registry_data = docker_registry.replace('https://', '').split('/')\n ecr_server = docker_registry_data[0]\n ecr_repository = docker_registry_data[1]\n extra_vars = [\n '-var', f'type={build_type}',\n '-var', f'bundle_flavour={flavour}',\n '-var', f'image_name={image_name}',\n '-var', f'ecr_server={ecr_server}',\n '-var', f'os_version={distribution}',\n '-var', f'ecr_repository={ecr_repository}',\n '-var', f'aws_access_key={os.environ[\"AWS_ACCESS_KEY_ID\"]}',\n '-var', f'aws_secret_key={os.environ[\"AWS_SECRET_ACCESS_KEY\"]}'\n ]\n\n if not publish:\n extra_vars += ['-except', 'publish']\n\n # Make sure we remove old containers before creting new ones\n run_command(['docker', 'rm', '-f', 'default'], check=False)\n\n elif build_type in ['bare_metal', 'lxd'] and publish:\n # Get information about base image\n base_image = recipe[name]['base_image'].replace('$distribution', distribution)\n\n # Get disk size to use\n disk_size = recipe[name].get('disk_size', 9) # In GB\n\n # Get base image\n base_image_local_path = '/tmp/' + base_image\n base_image_key = release_label + '/images/' + base_image\n click.echo(f'Downloading image from {base_image_key}')\n try:\n boto3.resource('s3').Bucket(apt_repo).download_file(base_image_key, base_image_local_path)\n except botocore.exceptions.ClientError:\n click.echo(f'Unable to download base image from {base_image_key}, creating a new one')\n run_command(['bash',\n '/tailor-image/environment/create_base_image.bash',\n f'{base_image_local_path}',\n f'{distribution}'])\n boto3.resource('s3').Bucket(apt_repo).upload_file(base_image_local_path, base_image_key)\n\n # Enable nbd kernel module, necesary for qemu's packer chroot builder\n run_command(['modprobe', 'nbd'])\n\n # Resize image\n run_command(['qemu-img', 'resize', base_image_local_path, '30G'])\n\n # Copy image\n tmp_image = base_image_local_path.replace('disk1', 'disk1-resized')\n run_command(['cp', base_image_local_path, tmp_image])\n\n # Resize partition inside qcow image\n run_command(['virt-resize', '--expand', '/dev/sda1', base_image_local_path, tmp_image])\n run_command(['mv', tmp_image, base_image_local_path])\n\n # Generate image name\n image_name = f'{organization}_{name}_{distribution}_{release_label}_{today}'\n\n extra_vars = [\n '-var', f'image_name={image_name}',\n '-var', f's3_bucket={apt_repo}',\n '-var', f'iso_image={base_image_local_path}',\n '-var', f'distribution={distribution}',\n '-var', f'disk_size={disk_size}'\n ]\n\n # Make sure to clean old image builds\n run_command(['rm', '-rf', '/tmp/images'])\n\n elif build_type == 'ami':\n image_name = f'{organization}_{name}_{distribution}_ami_{release_label}'\n # Get ami-id for base image\n source_ami_id = recipe[name]['source_ami'].get(distribution)\n\n if not source_ami_id:\n click.echo(f'You need to specify a bas AMI for the desired distribution {distribution}')\n sys.exit(1)\n\n # Increase fow how long we wait for image to be ready. Default is 30 minutes, sometime it might take longer\n env['AWS_MAX_ATTEMPTS'] = '90' # minutes\n env['AWS_POLL_DELAY_SECONDS'] = '60' # Poll for status every minute\n\n extra_vars = [\n '-var', f'build_date={today}',\n '-var', f'image_name={image_name}',\n '-var', f'name={name}',\n '-var', f'source_ami_id={source_ami_id}',\n '-var', f'distribution={distribution}',\n '-var', f'release_label={release_label}',\n '-var', f'aws_access_key={os.environ[\"AWS_ACCESS_KEY_ID\"]}',\n '-var', f'aws_secret_key={os.environ[\"AWS_SECRET_ACCESS_KEY\"]}'\n ]\n else:\n return 0\n\n extra_vars.extend(optional_vars)\n\n click.echo(f'Building {build_type} image with: {provision_file}', err=True)\n\n command = ['packer', 'build',\n '-var', f'playbook_file={provision_file_path}',\n '-var', f'organization={organization}',\n '-var', f'bundle_track={release_track}',\n '-var', f'bundle_version={release_label}'] + extra_vars + ['-timestamp-ui', template_path]\n\n run_command(command, env=env, cwd='/tmp')\n\n if build_type in ['bare_metal', 'lxd'] and publish:\n update_image_index(release_label, apt_repo, common_config, image_name)", "def build(config, version_tag):\n config_dict = get_config(config)\n image_name = config_dict['IMAGE'].split(':')[0]\n image = '{}:{}'.format(image_name, version_tag)\n base_image = config_dict['BASE_IMAGE']\n\n cmd = 'docker build -t {image} --build-arg base_image={base_image} .'.format(image=image,\n base_image=base_image)\n with cd(env.project_dir):\n run(cmd)\n return image", "def bdocker(ctx, host):\n ctx.obj = commands.CommandController(endpoint=host)", "def prepare():\n sh('docker build --rm -t {image} {dir}', image=IMAGE, dir=os.path.dirname(__file__))", "def Run(self, args):\n\n if not args.source and not args.no_source:\n raise c_exceptions.InvalidArgumentException(\n '--no-source', 'To omit source, use the --no-source flag.')\n\n if args.no_source:\n if args.tag:\n raise c_exceptions.RequiredArgumentException(\n 'SOURCE',\n 'Source is required to build container image.'\n )\n if args.config:\n raise c_exceptions.RequiredArgumentException(\n 'SOURCE',\n 'Source is required when specifying --config because it is a '\n 'relative path in the source directory.')\n\n do_build_and_push = args.image is None\n if not do_build_and_push and not args.config:\n args.no_source = True\n\n image = self._DetermineImageFromArgs(args)\n\n # Determine app_name\n if args.app_name:\n app_name = args.app_name\n else:\n app_name = self._ImageName(image)\n\n # Determine app_version\n app_version = None\n image_has_tag = '@' not in image and ':' in image\n if args.app_version:\n app_version = args.app_version\n elif image_has_tag:\n app_version = image.split(':')[-1] # Set version to tag\n elif args.source:\n if git.IsGithubRepository(\n args.source) and not git.HasPendingChanges(args.source):\n commit_sha = git.GetGitHeadRevision(args.source)\n if commit_sha:\n app_version = commit_sha\n\n # Validate expose\n if args.expose and args.expose < 0:\n raise c_exceptions.InvalidArgumentException('--expose',\n 'port number is invalid')\n\n # Determine gcs_staging_dir_bucket and gcs_staging_dir_object\n if args.gcs_staging_dir is None:\n gcs_staging_dir_bucket = staging_bucket_util.GetDefaultStagingBucket()\n gcs_staging_dir_object = 'deploy'\n else:\n try:\n gcs_staging_dir_ref = resources.REGISTRY.Parse(\n args.gcs_staging_dir, collection='storage.objects')\n gcs_staging_dir_object = gcs_staging_dir_ref.object\n except resources.WrongResourceCollectionException:\n gcs_staging_dir_ref = resources.REGISTRY.Parse(\n args.gcs_staging_dir, collection='storage.buckets')\n gcs_staging_dir_object = None\n gcs_staging_dir_bucket = gcs_staging_dir_ref.bucket\n\n gcs_client = storage_api.StorageClient()\n gcs_client.CreateBucketIfNotExists(gcs_staging_dir_bucket)\n\n # If we are using a default bucket check that it is owned by user project\n # (b/33046325)\n if (args.gcs_staging_dir is None\n and not staging_bucket_util.BucketIsInProject(\n gcs_client, gcs_staging_dir_bucket)):\n raise c_exceptions.RequiredArgumentException(\n '--gcs-staging-dir',\n 'A bucket with name {} already exists and is owned by '\n 'another project. Specify a bucket using '\n '--gcs-staging-dir.'.format(gcs_staging_dir_bucket))\n\n if gcs_staging_dir_object:\n gcs_config_staging_path = '{}/{}/config'.format(\n gcs_staging_dir_bucket, gcs_staging_dir_object)\n else:\n gcs_config_staging_path = gcs_staging_dir_bucket\n\n if not args.no_source:\n staged_source = self._StageSource(args.source, gcs_staging_dir_bucket,\n gcs_staging_dir_object)\n else:\n staged_source = None\n\n messages = cloudbuild_util.GetMessagesModule()\n build_config = build_util.CreateBuild(\n messages,\n build_timeout=properties.VALUES.builds.timeout.Get(),\n build_and_push=do_build_and_push,\n staged_source=staged_source,\n image=image,\n dockerfile_path='Dockerfile',\n app_name=app_name,\n app_version=app_version,\n config_path=args.config,\n namespace=args.namespace,\n expose_port=args.expose,\n gcs_config_staging_path=gcs_config_staging_path,\n cluster=args.cluster,\n location=args.location,\n build_tags=([] if not args.app_name else [args.app_name]))\n\n client = cloudbuild_util.GetClientInstance()\n self._SubmitBuild(\n client, messages, build_config, gcs_config_staging_path,\n args.config is None, args.async_)", "def cli(ctx, image_file):\n if not image_file:\n return\n for pull_image in image_file.readline():\n pull_image = pull_image.rstrip('\\n')\n if len(pull_image) == 0:\n continue\n docker.pull(pull_image)\n push_image = '%s/%s/%s' % (DEFAULT_REGISTRY,\n DEFAULR_NAMESPACE,\n pull_image.split('/')[-1])\n docker.tag(pull_image, push_image)\n docker.push(push_image)", "def main():\n\n # get AWS credentials\n aws_credentials = read_aws_credentials()\n access_key_id = aws_credentials['access_key_id']\n secret_access_key = aws_credentials['secret_access_key']\n aws_region = aws_credentials['region']\n\n # build Docker image\n docker_client = docker.from_env()\n image, build_log = docker_client.images.build(\n path='.', tag=LOCAL_REPOSITORY, rm=True)\n\n # get AWS ECR login token\n ecr_client = boto3.client(\n 'ecr', aws_access_key_id=access_key_id, \n aws_secret_access_key=secret_access_key, region_name=aws_region)\n\n ecr_credentials = (\n ecr_client\n .get_authorization_token()\n ['authorizationData'][0])\n\n ecr_username = 'AWS'\n\n ecr_password = (\n base64.b64decode(ecr_credentials['authorizationToken'])\n .replace(b'AWS:', b'')\n .decode('utf-8'))\n\n ecr_url = ecr_credentials['proxyEndpoint']\n\n # get Docker to login/authenticate with ECR\n docker_client.login(\n username=ecr_username, password=ecr_password, registry=ecr_url)\n\n # tag image for AWS ECR\n ecr_repo_name = '{}/{}'.format(\n ecr_url.replace('https://', ''), LOCAL_REPOSITORY)\n\n image.tag(ecr_repo_name, tag='latest')\n\n # push image to AWS ECR\n push_log = docker_client.images.push(ecr_repo_name, tag='latest')\n\n # force new deployment of ECS service\n ecs_client = boto3.client(\n 'ecs', aws_access_key_id=access_key_id,\n aws_secret_access_key=secret_access_key, region_name=aws_region)\n\n ecs_client.update_service(\n cluster=ECS_CLUSTER, service=ECS_SERVICE, forceNewDeployment=True)\n\n return None", "def _start_instance(self, resource_handler):\n log.debug('Starting container')\n cli = resource_handler.cli\n #host_config=cli.create_host_config(network_mode=self.network_mode)\n container = cli.create_container(\n image='{0.image}:{0.tag}'.format(self),\n command=self.command,\n #host_config=host_config,\n environment=self.env\n )\n\n cli.start(container.get('Id'))\n log.debug('Started container [%s]', container)\n return str(container)", "def build_container_image(self) -> None:\n print_version_of_tools()\n try:\n self.fs_watcher.start()\n runner = PluginsRunner(self,\n self.plugins_conf,\n self.plugin_files,\n self.keep_plugins_running,\n plugins_results=self.data.plugins_results)\n runner.run()\n finally:\n self.fs_watcher.finish()", "def create_node(self, **kwargs):\n default = \"ubuntu.precise\"\n template = {\"name\":\"ubuntu\", \"args\":[]}\n if 'image' not in kwargs:\n kwargs['image'] = default\n \n for image in self.list_images():\n if image.name == kwargs['image']:\n template = {\"name\":image.extra[\"template_name\"],\n \"args\":image.extra[\"template_args\"]\n }\n \n name = kwargs['name']\n container = {\n \"cgroups\": [],\n \"name\": name,\n \"conf\": [],\n \"template\": template\n }\n \n self.connection.request(action=\"/v1/containers\", method=\"POST\", data=json.dumps(container))\n self.connection.request(action=\"/v1/containers/%s/actions/start\" % name, method=\"POST\")\n return self.get_node(name)", "def _load(self, resource_handler):\n log.info('[%s] Loading Docker image origin=%r image=%r tag=%r',\n resource_handler.name, self.origin, self.image, self.tag)\n if self.origin == 'dockerhub':\n resource_handler.cli.pull(repository=self.image, tag=self.tag)\n elif self.origin == 'local':\n pass\n else:\n resource_handler.cli.import_image_from_url(\n url=self.origin,\n repository=self.image,\n tag=self.tag\n )", "def build_docker(c):\n tag = c.run('git describe', hide=True)\n docker_img = f'{docker_repo}:{tag.stdout.strip()}'\n c.run(f'docker build -t {docker_img} .')", "def _build_container(\n self, target_image, odcs, repo_type, repo_list, terminate_event,\n scratch, record):\n self.logger.info(\"Building image: %s\" % target_image)\n cmd_list = [\"rhpkg\", \"--path=%s\" % self.distgit_dir]\n\n if self.runtime.user is not None:\n cmd_list.append(\"--user=%s\" % self.runtime.user)\n\n cmd_list += (\n \"container-build\",\n \"--nowait\",\n )\n\n if odcs:\n if odcs == 'signed':\n odcs = 'release' # convenience option for those used to the old types\n cmd_list.append('--signing-intent')\n cmd_list.append(odcs)\n else:\n if repo_type:\n repo_list = list(repo_list) # In case we get a tuple\n repo_list.append(self.metadata.cgit_url(\".oit/\" + repo_type + \".repo\"))\n\n if repo_list:\n # rhpkg supports --repo-url [URL [URL ...]]\n cmd_list.append(\"--repo-url\")\n cmd_list.extend(repo_list)\n\n if scratch:\n cmd_list.append(\"--scratch\")\n\n # Run the build with --nowait so that we can immediately get information about the brew task\n rc, out, err = exectools.cmd_gather(cmd_list)\n\n if rc != 0:\n # Probably no point in continuing.. can't contact brew?\n self.logger.info(\"Unable to create brew task: out={} ; err={}\".format(out, err))\n return False\n\n # Otherwise, we should have a brew task we can monitor listed in the stdout.\n out_lines = out.splitlines()\n\n # Look for a line like: \"Created task: 13949050\" . Extract the identifier.\n task_id = next((created_line.split(\":\")[1]).strip() for created_line in out_lines if\n created_line.startswith(\"Created task:\"))\n\n record[\"task_id\"] = task_id\n\n # Look for a line like: \"Task info: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=13948942\"\n task_url = next((info_line.split(\":\", 1)[1]).strip() for info_line in out_lines if\n info_line.startswith(\"Task info:\"))\n\n self.logger.info(\"Build running: {}\".format(task_url))\n\n record[\"task_url\"] = task_url\n\n # Now that we have the basics about the task, wait for it to complete\n error = watch_task(self.logger.info, task_id, terminate_event)\n\n # Looking for something like the following to conclude the image has already been built:\n # BuildError: Build for openshift-enterprise-base-v3.7.0-0.117.0.0 already exists, id 588961\n if error is not None and \"already exists\" in error:\n self.logger.info(\"Image already built against this dist-git commit (or version-release tag): {}\".format(target_image))\n error = None\n\n # Gather brew-logs\n logs_dir = \"%s/%s\" % (self.runtime.brew_logs_dir, self.metadata.name)\n logs_rc, _, logs_err = exectools.cmd_gather([\"brew\", \"download-logs\", \"-d\", logs_dir, task_id])\n\n if logs_rc != 0:\n self.logger.info(\"Error downloading build logs from brew for task %s: %s\" % (task_id, logs_err))\n\n if error is not None:\n # An error occurred. We don't have a viable build.\n self.logger.info(\"Error building image: {}, {}\".format(task_url, error))\n return False\n\n self.logger.info(\"Successfully built image: {} ; {}\".format(target_image, task_url))\n return True", "def build_test_version_of_container_image(\n image_type: ImageType,\n image_builder_cls: Type[ContainerisedAgentBuilder],\n architecture: CpuArch,\n result_image_name: str,\n ready_image_oci_tarball: pl.Path = None,\n install_additional_test_libs: bool = True,\n):\n\n image_builder = image_builder_cls()\n\n registry_container_name = \"agent_image_e2e_test_registry\"\n\n delete_container(container_name=registry_container_name)\n\n # Create temporary local registry to push production image there.\n subprocess.run(\n [\n \"docker\",\n \"run\",\n \"-d\",\n \"--rm\",\n \"-p=5000:5000\",\n f\"--name={registry_container_name}\",\n \"registry:2\",\n ],\n check=True,\n )\n try:\n all_image_tags = image_builder.generate_final_registry_tags(\n image_type=image_type,\n registry=\"localhost:5000\",\n name_prefix=\"user\",\n tags=[\"prod\"],\n )\n\n # Publish image to the local registry\n image_builder.publish(\n image_type=image_type,\n tags=all_image_tags,\n existing_oci_layout_tarball=ready_image_oci_tarball,\n no_verify_tls=True,\n )\n\n prod_image_tag = all_image_tags[0]\n if install_additional_test_libs:\n\n # Build agent image requirements, because it also includes requirements (like coverage) for testing.\n requirement_libs_dir = image_builder.build_requirement_libs(\n architecture=architecture,\n )\n\n # Build testable image.\n buildx_build(\n dockerfile_path=_PARENT_DIR / \"Dockerfile\",\n context_path=_PARENT_DIR,\n architectures=[architecture],\n build_contexts={\n \"prod_image\": f\"docker-image://{prod_image_tag}\",\n \"requirement_libs\": str(requirement_libs_dir),\n },\n output=DockerImageBuildOutput(\n name=result_image_name,\n ),\n )\n else:\n subprocess.run(\n [\n \"docker\",\n \"pull\",\n prod_image_tag,\n ],\n check=True,\n )\n\n subprocess.run(\n [\n \"docker\",\n \"tag\",\n prod_image_tag,\n result_image_name,\n ],\n check=True,\n )\n finally:\n delete_container(container_name=registry_container_name)\n\n return result_image_name", "def dockerize_test(ctx, binary, skip_cleanup=False):\n import docker\n\n client = docker.from_env()\n temp_folder = tempfile.mkdtemp(prefix=\"ddtest-\")\n\n ctx.run(\"cp %s %s/test.bin\" % (binary, temp_folder))\n\n with open(\"%s/Dockerfile\" % temp_folder, 'w') as stream:\n stream.write(\n \"\"\"FROM debian:stretch-slim\nENV DOCKER_DD_AGENT=yes\nWORKDIR /\nADD https://github.com/docker/compose/releases/download/1.16.1/docker-compose-Linux-x86_64 /bin/docker-compose\nRUN echo \"1804b0ce6596efe707b9cab05d74b161833ed503f0535a937dd5d17bea8fc50a /bin/docker-compose\" > sum && \\\n sha256sum -c sum && \\\n chmod +x /bin/docker-compose\nCMD /test.bin\nCOPY test.bin /test.bin\n\"\"\"\n )\n # Handle optional testdata folder\n if os.path.isdir(\"./testdata\"):\n ctx.run(\"cp -R testdata %s\" % temp_folder)\n stream.write(\"COPY testdata /testdata\")\n\n test_image, _ = client.images.build(path=temp_folder, rm=True)\n\n scratch_volume = client.volumes.create()\n\n test_container = client.containers.run(\n test_image.id,\n detach=True,\n pid_mode=\"host\", # For origin detection\n environment=[\"SCRATCH_VOLUME_NAME=\" + scratch_volume.name, \"SCRATCH_VOLUME_PATH=/tmp/scratch\",],\n volumes={\n '/var/run/docker.sock': {'bind': '/var/run/docker.sock', 'mode': 'ro'},\n '/proc': {'bind': '/host/proc', 'mode': 'ro'},\n '/sys/fs/cgroup': {'bind': '/host/sys/fs/cgroup', 'mode': 'ro'},\n scratch_volume.name: {'bind': '/tmp/scratch', 'mode': 'rw'},\n },\n )\n\n exit_code = test_container.wait()['StatusCode']\n\n print(test_container.logs(stdout=True, stderr=False, stream=False))\n\n sys.stderr.write(test_container.logs(stdout=False, stderr=True, stream=False).decode(sys.stderr.encoding))\n\n if not skip_cleanup:\n shutil.rmtree(temp_folder)\n test_container.remove(v=True, force=True)\n scratch_volume.remove(force=True)\n client.images.remove(test_image.id)\n\n if exit_code != 0:\n raise Exit(code=exit_code)", "def deploy_component(profile, image, instance_name, docker_config, should_wait=False,\n logins=[]):\n ports = docker_config.get(\"ports\", None)\n hcp = doc.add_host_config_params_ports(ports=ports)\n volumes = docker_config.get(\"volumes\", None)\n hcp = doc.add_host_config_params_volumes(volumes=volumes, host_config_params=hcp)\n # Thankfully passing in an IP will return back an IP\n dh = profile.docker_host.split(\":\")[0]\n _, _, dhips = socket.gethostbyname_ex(dh)\n\n if dhips:\n hcp = doc.add_host_config_params_dns(dhips[0], hcp)\n else:\n raise DockerConstructionError(\"Could not resolve the docker hostname:{0}\".format(dh))\n\n envs = build_envs(profile, docker_config, instance_name)\n client = get_docker_client(profile, logins=logins)\n\n config = doc.create_container_config(client, image, envs, hcp)\n\n return _run_container(client, config, name=instance_name, wait=should_wait)", "def do_run(cs, args):\n opts = {}\n opts['name'] = args.name\n opts['image'] = args.image\n opts['memory'] = args.memory\n opts['cpu'] = args.cpu\n opts['environment'] = zun_utils.format_args(args.environment)\n opts['workdir'] = args.workdir\n opts['auto_remove'] = args.auto_remove\n opts['labels'] = zun_utils.format_args(args.label)\n opts['image_pull_policy'] = args.image_pull_policy\n opts['image_driver'] = args.image_driver\n opts['hints'] = zun_utils.format_args(args.hint)\n opts['nets'] = zun_utils.parse_nets(args.net)\n opts['mounts'] = zun_utils.parse_mounts(args.mount)\n opts['runtime'] = args.runtime\n opts['hostname'] = args.hostname\n opts['disk'] = args.disk\n opts['availability_zone'] = args.availability_zone\n opts['command'] = args.command\n opts['registry'] = args.registry\n opts['host'] = args.host\n if args.entrypoint:\n opts['entrypoint'] = zun_utils.parse_entrypoint(args.entrypoint)\n if args.healthcheck:\n opts['healthcheck'] = zun_utils.parse_health(args.healthcheck)\n\n if args.auto_heal:\n opts['auto_heal'] = args.auto_heal\n if args.security_group:\n opts['security_groups'] = args.security_group\n if args.expose_port:\n opts['exposed_ports'] = zun_utils.parse_exposed_ports(args.expose_port)\n if args.restart:\n opts['restart_policy'] = zun_utils.check_restart_policy(args.restart)\n if args.interactive:\n opts['interactive'] = True\n if args.privileged:\n opts['privileged'] = True\n opts = zun_utils.remove_null_parms(**opts)\n container = cs.containers.run(**opts)\n _show_container(container)\n container_uuid = getattr(container, 'uuid', None)\n if args.interactive:\n ready_for_attach = False\n while True:\n container = cs.containers.get(container_uuid)\n if zun_utils.check_container_status(container, 'Running'):\n ready_for_attach = True\n break\n if zun_utils.check_container_status(container, 'Error'):\n raise exceptions.ContainerStateError(container_uuid)\n print(\"Waiting for container start\")\n time.sleep(1)\n if ready_for_attach is True:\n response = cs.containers.attach(container_uuid)\n websocketclient.do_attach(cs, response, container_uuid, \"~\", 0.5)\n else:\n raise exceptions.InvalidWebSocketLink(container_uuid)", "def _build(build_context,\n image_tag,\n image_name,\n nocache,\n credstore_env=None,\n registries=None):\n _logger.info('Starting build ...')\n\n # Build the image\n docker_builder = DockerBuilder(\n build_context=build_context,\n image_name=image_name,\n image_tag=image_tag,\n credstore_env=credstore_env,\n registries=registries,\n )\n docker_builder.login_private_registries()\n if docker_builder.check_image():\n # Image already built\n docker_builder.clean()\n return docker_builder\n if not docker_builder.build(nocache=nocache):\n docker_builder.clean()\n raise BuildException('The docker image could not be built.')\n return docker_builder", "def _ensure_image(testkit_path, branch_name, artifacts_path):\n # Construct Docker image name from branch name\n image_name = \"runner:%s\" % branch_name\n image_path = os.path.join(testkit_path, \"runner_image\")\n docker.build_and_tag(image_name, image_path, log_path=artifacts_path)\n\n return image_name", "def _put_container_on_host(self, container_name):\n with hide(\"output\", \"warnings\", \"running\"):\n # first see if container is already present on host\n host_result = self._execute_root(\n \"docker images -q {cn}\".format(cn=container_name))\n if len(host_result.stdout) > 0:\n print(\"Found %s on host\" % container_name)\n return True\n # now try to pull from Docker Hub\n hub_result = self._execute_root(\"docker pull {cn}\".format(cn=container_name),\n warn_only=True)\n if hub_result.return_code == 0:\n print(\"Found %s in Docker hub\" % container_name)\n return True\n\n # assume container_name refers to a local container and\n # copy it to host\n local_result = local(\n \"docker images -q {cn}\".format(cn=container_name))\n\n if len(local_result.stdout) > 0:\n saved_fname = container_name.replace(\"/\", \"_\")\n subprocess.call(\"docker save -o /tmp/{fn}.tar {cn}\".format(\n fn=saved_fname,\n cn=container_name))\n tar_loc = \"/tmp/{fn}.tar\".format(fn=saved_fname)\n self._execute_put(tar_loc, tar_loc)\n self._execute_root(\"docker load -i {loc}\".format(loc=tar_loc))\n # self._execute_root(\"docker tag {image_id} {cn}\".format(\n # image_id=image_id, cn=cn))\n # now check to make sure we can access it\n host_result = self._execute_root(\n \"docker images -q {cn}\".format(cn=container_name))\n if len(host_result.stdout) > 0:\n print(\"Successfuly copied %s to host\" % container_name)\n return True\n else:\n warn(\n \"Problem copying container %s to host\" %\n container_name)\n return False\n\n # out of options\n warn(\"Could not find %s, please try with a valid \"\n \"container docker image\")\n return False", "def docker_operation(dut, docker_name, operation):\n command = 'docker {} {}'.format(operation, docker_name)\n return st.config(dut, command)", "def test_create_container(self):\n pass", "def render_dockerfile(self):\n logger.info(\"Rendering Dockerfile...\")\n\n if self._params.get('redhat'):\n self._inject_redhat_defaults()\n\n self.image['pkg_manager'] = self._params.get('package_manager', 'yum')\n self.image.process_defaults()\n\n template_file = os.path.join(os.path.dirname(__file__),\n '..',\n 'templates',\n 'template.jinja')\n loader = FileSystemLoader(os.path.dirname(template_file))\n env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True)\n env.globals['helper'] = TemplateHelper()\n env.globals['addhelp'] = self._params.get('addhelp')\n\n template = env.get_template(os.path.basename(template_file))\n\n dockerfile = os.path.join(self.target,\n 'image',\n 'Dockerfile')\n if not os.path.exists(os.path.dirname(dockerfile)):\n os.makedirs(os.path.dirname(dockerfile))\n\n with open(dockerfile, 'wb') as f:\n f.write(template.render(\n self.image).encode('utf-8'))\n logger.debug(\"Dockerfile rendered\")\n\n if self.image.get('help', {}).get('template', \"\"):\n help_template_path = self.image['help']['template']\n elif self._params.get('help_template'):\n help_template_path = self._params['help_template']\n else:\n help_template_path = os.path.join(os.path.dirname(__file__),\n '..',\n 'templates',\n 'help.jinja')\n\n help_dirname, help_basename = os.path.split(help_template_path)\n loader = FileSystemLoader(help_dirname)\n env = Environment(loader=loader, trim_blocks=True, lstrip_blocks=True)\n env.globals['helper'] = TemplateHelper()\n help_template = env.get_template(help_basename)\n\n helpfile = os.path.join(self.target, 'image', 'help.md')\n with open(helpfile, 'wb') as f:\n f.write(help_template.render(\n self.image).encode('utf-8'))\n logger.debug(\"help.md rendered\")", "def _Build(self, image):\n image = _ContainerImage(image)\n build_start = time.time()\n if not FLAGS.local_container_build:\n try:\n # Build the image remotely using an image building service.\n self.RemoteBuild(image)\n self.remote_build_times[image.name] = time.time() - build_start\n return\n except NotImplementedError:\n pass\n\n self.PrePush(image)\n # Build the image locally using docker.\n build_start = time.time()\n self.LocalBuildAndPush(image)\n self.local_build_times[image.name] = time.time() - build_start" ]
[ "0.73722464", "0.6459526", "0.641473", "0.6365599", "0.6323354", "0.62596554", "0.6234621", "0.6196366", "0.61554444", "0.6126631", "0.611109", "0.60908455", "0.60905975", "0.607649", "0.60459405", "0.6043136", "0.60285693", "0.60256535", "0.6020434", "0.5990217", "0.59659857", "0.5961286", "0.594661", "0.592377", "0.59017026", "0.59013253", "0.5898091", "0.585934", "0.5858884", "0.5850641" ]
0.70310724
1
Starts the container, and optionally executes a callback that is passed the container's info
def start(self, callback=None): self.logger.debug('Starting container {}'.format(self.image)) response = self.client.start(container=self.container['Id']) if response: self.logger.warning(response) self.logger.debug('Checking if {} service is ready'.format(self.name)) timed(lambda: self.running, time_out=30, exit_on=True) timed(lambda: self.ready, time_out=30, exit_on=True) self.logger.debug('Service {} is ready'.format(self.name)) if callable(callback): callback(container=self.container) self.logger.debug('Startup of {} complete'.format(self.name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self, container: Container):", "def start(self):\n print \"start: starting container on {}\".format(self.daemon.host_name)\n\n if self.details is None:\n return None\n else:\n if not self.details['State']['Running'] is True:\n result = self.daemon.connection.start(self.config['release_name'],\n port_bindings=self.config['s_ports'],\n binds=self.config['binds'],\n links=self.config['links'])\n return result\n else:\n return None", "def launch_container(self, service_info):\n user, instance = _get_user_and_instance(\n self.girder_client, service_info['instanceId'])\n tale = self.girder_client.get(f\"/tale/{service_info['taleId']}\")\n\n self.job_manager.updateProgress(\n message='Starting container', total=LAUNCH_CONTAINER_STEP_TOTAL,\n current=1, forceFlush=True)\n\n print(\"Launching container for a Tale...\")\n if 'imageInfo' not in tale:\n\n # Wait for image to be built\n tic = time.time()\n timeout = 180.0\n time_interval = 5\n\n while time.time() - tic < timeout:\n tale = self.girder_client.get('/tale/{taleId}'.format(**instance))\n if 'imageInfo' in tale and 'digest' in tale['imageInfo']:\n break\n msg = f\"Waiting for image build to complete. ({time_interval}s)\"\n logging.info(msg)\n print(msg)\n time.sleep(5)\n\n container_config = _get_container_config(self.girder_client, tale)\n service, attrs = _launch_container(service_info, container_config)\n print(\n f\"Started a container using volume: {service_info['volumeName']} \"\n f\"on node: {service_info['nodeId']}\"\n )\n\n # wait until task is started\n tic = time.time()\n timeout = 300.0\n started = False\n\n print(\"Waiting for the environment to be accessible...\")\n while time.time() - tic < timeout:\n try:\n status = service.tasks()[0]['Status']\n\n if status['State'] in {\"failed\", \"rejected\"}:\n raise ValueError(\"Failed to start environment: %s\" % status['Err'])\n elif status['State'] == \"running\":\n started = True\n break\n\n except IndexError:\n started = False\n\n time.sleep(0.2)\n\n if not started:\n raise ValueError(\"Tale did not start before timeout exceeded\")\n\n print(\"Environment is up and running.\")\n self.job_manager.updateProgress(\n message='Container started', total=LAUNCH_CONTAINER_STEP_TOTAL,\n current=LAUNCH_CONTAINER_STEP_TOTAL, forceFlush=True)\n\n service_info.update(attrs)\n service_info['name'] = service.name\n return service_info", "def run(self, container_config: ContainerConfig) -> Container:", "def _start_instance(self, resource_handler):\n log.debug('Starting container')\n cli = resource_handler.cli\n #host_config=cli.create_host_config(network_mode=self.network_mode)\n container = cli.create_container(\n image='{0.image}:{0.tag}'.format(self),\n command=self.command,\n #host_config=host_config,\n environment=self.env\n )\n\n cli.start(container.get('Id'))\n log.debug('Started container [%s]', container)\n return str(container)", "async def async_turn_on(self):\n if self._container:\n try:\n self._container.start()\n except Exception as ex:\n _LOGGER.info(\"Cannot start container ({})\".format(ex))", "def _execute_container(self):\n pass", "def ensure_container():\n return exec_fn(_init_container)", "def start_container(self):\n logger = logging.getLogger(self.dkr_name)\n logger.info(\"Starting up container\")\n\n try:\n svc = self.docker_client().containers.get(self.dkr_name)\n except Exception:\n svc = self.docker_client().containers.run(image=self.dkr_image,\n name=self.dkr_name,\n command=self.dkr_command,\n ports=self.dkr_ports,\n links=self.dkr_links,\n environment=self.dkr_env,\n detach=True,\n remove=self.dkr_remove)\n\n while svc.status != \"running\":\n svc.reload()\n sleep(1)\n\n self.dkr_container = svc", "def start_container(client, work_package, load_saved):\n package_path = os.path.join(PATH, \"work_packages\")\n\n client.containers.run(image=\"scrape_light\",\n environment=[\"PACKAGE=\"+work_package, \"LOAD_FILE=\" + load_saved,\n \"[email protected]\", \"PASSWORD=LA#kYs1#o:`Z\"],\n detach=True, tty=True, stdin_open=True,\n sysctls={\"net.ipv4.conf.all.rp_filter\": 2},\n privileged=True,\n devices=[\"/dev/net/tun\"],\n name=\"scrape_\" + str(work_package),\n cap_add=[\"NET_ADMIN\", \"SYS_MODULE\"],\n volumes={package_path: {\"bind\": \"/work_packages\"}})", "async def start(self, collection, docker):\n if not self.options:\n logger.debug(\"Watcher not configured\")\n return\n\n bind = {'bind': '/var/run/docker.sock', 'ro': False}\n volumes = {'/var/run/docker.sock': bind}\n ports = {}\n env = {'AWS_ACCESS_KEY_ID': self.options['AWS_ACCESS_KEY_ID'] or \"\",\n 'AWS_SECRET_ACCESS_KEY':\n self.options['AWS_SECRET_ACCESS_KEY'] or \"\"}\n\n logger.debug(\"Launching Watcher...\")\n await docker.run_containers(collection, self.info.name,\n \"python ./watch.py\", env=env,\n volumes=volumes, ports=ports,\n pid_mode=\"host\")", "def docker_container():\n if SETUP_SPLASH:\n dm = DockerManager()\n dm.start_container()\n\n try:\n requests.post('{}/_gc'.format(SPLASH_URL))\n except requests.exceptions.RequestException:\n pass\n\n yield", "def start(self, *args):\n return _SALOMERuntime.SalomeContainer_start(self, *args)", "def start(self):\n if self._callable:\n self._is_running = True\n self._run_client()", "def editor_cloud9_start():\n _require_editor_cloud9_dirs()\n docker_vars = _editor_cloud9_docker_vars()\n\n # start the container\n run('docker start `cat /home/%s/docker/ids/editor_cloud9_container`' %\n env.user)", "def docker_run(self) -> bool:\n containers = self.client.containers.list(filters={\"name\": self.cname})\n if containers:\n self.container = containers[0]\n return False\n\n info(\"Starting container {}...\".format(self.cname), nl=False)\n self.container = self.client.containers.run(\n image=self.image,\n detach=True,\n auto_remove=False,\n environment=self.env,\n hostname=self.dist,\n init=True,\n name=self.cname,\n remove=False,\n stdin_open=sys.stdin.isatty(),\n tty=True,\n volumes=self.volumes,\n entrypoint=\"bash\",\n command=[],\n )\n info(\"Done!\")\n\n return True", "def runContainer(purpose, externalPort, osVersion, containerName, debug=True, **kwargs):\n\n # Optional debug that prints a dict of options\n if debug == True:\n runSpecialOpts = dict(purpose=purpose, externalPort=externalPort, osVersion=osVersion, containerName=containerName)\n print(runSpecialOpts)\n\n # Determines what we do based on purpose\n if purpose == \"ssh\":\n if osVersion == \"centos7\":\n sshContainer = dockerClient.containers.run('centos7/systemd', privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, ports={'22/tcp':externalPort})\n elif osVersion == \"centos8\":\n sshContainer = dockerClient.containers.run('centos8/systemd', privileged=False, volumes={'/sys/fs/cgroup': {'bind':'/sys/fs/cgroup', 'mode':'ro'}}, tmpfs={'/tmp':'exec', '/run':''}, detach=True, name=containerName, ports={'22/tcp':externalPort})\n\n containersList = dockerClient.containers.list(filters={'name': containerName})\n\n if len(containersList) == 1:\n creationStatus = True\n else:\n creationStatus = False\n\n return creationStatus", "def run_container(self,\n name: str,\n command: Optional[str] = None,\n env: Optional[StrDict] = None,\n volumes: Optional[Dict[str, StrDict]] = None,\n ports: Optional[Dict[Any, Any]] = None,\n dns: Optional[List[str]] = None,\n pid_mode: Optional[str] = None,\n entrypoint: Optional[str] = None):\n if volumes is None:\n volumes = {}\n if dns is None:\n dns = []\n\n expose = []\n port_bindings = {}\n for port in ports.keys():\n if isinstance(port, tuple):\n proto = port[1] if len(port) == 2 else \"tcp\"\n key = \"%d/%s\" % (port[0], proto)\n else:\n key = port\n port_bindings[key] = ports[port]\n expose.append(port)\n\n result = self._client.create_container(\n name, command=command, environment=env,\n volumes=[volume['bind'] for volume in volumes.values()],\n ports=expose,\n entrypoint=entrypoint)\n\n container = result[\"Id\"]\n result = self._client.start(container, binds=volumes,\n port_bindings=port_bindings, dns=dns,\n pid_mode=pid_mode)\n response = self._client.inspect_container(container)\n return response", "def run(self):\n self.loop.spawn_callback(self.main)\n self.loop.start()\n if self.exc_info:\n six.reraise(*self.exc_info)", "def cmd_CONTAINER(self, line):\r\n config = ContainerOptions(self.terminal)\r\n\r\n try:\r\n config.parseOptions(line)\r\n cmd = config.subCommand\r\n opts = config.subOptions if hasattr(config, 'subOptions') else {}\r\n except usage.UsageError as errortext:\r\n self.terminal.write(\"BUG in usage: {0}\".format(errortext))\r\n else:\r\n if cmd == 'start':\r\n if (opts['name']):\r\n data = {}\r\n if opts.get('group'):\r\n data['group'] = opts['group']\r\n if opts.get('groupIp'):\r\n data['groupIp'] = opts['groupIp']\r\n if opts.get('size'):\r\n data['size'] = opts['size']\r\n if opts.get('bandwidth'):\r\n data['bandwidth'] = opts['bandwidth']\r\n if opts.get('memory'):\r\n data['memory'] = opts['memory']\r\n if opts.get('specialopts'):\r\n data['specialFeatures'] = opts['specialopts']\r\n self.callToUser('createContainer', 'robot', opts['name'],\r\n data)\r\n\r\n elif config['stop']:\r\n self.callToUser('destroyContainer', 'robot', config['stop'])\r\n elif config['services']:\r\n self.callToRosProxy('services', config['services'])\r\n elif config['topics']:\r\n self.callToRosProxy('topics', config['topics'])\r\n elif config['list']:\r\n self.callToUserAndDisplay('list_containers', 'console')\r\n elif config['username']:\r\n self.callToUserAndDisplay('list_containers_by_user', 'admin',\r\n config['username'])", "def test_start(self):\n\n message = {\"method\": \"start\",\n \"params\": {\"elem\": self.container_to_run}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"start\")\n self.assertIsInstance(response[\"result\"], list)\n\n container_name = \"/\" + self.container_to_run\n\n containers = {i[0]: i[1] for i in response[\"result\"]}\n self.assertIn(container_name, containers.keys(),\n \"Container not found\")\n\n find_up_status = containers[container_name].lower().find(\"up\")\n\n self.assertEqual(find_up_status, 0, \"Container is not running\")", "def start(params) -> None:\n check_root()\n start_microservice(params)\n load_kernel_module(params)\n start_streamer(params)", "def start(name, call=None):\n datacenter_id = get_datacenter_id()\n conn = get_conn()\n node = get_node(conn, name)\n\n conn.start_server(datacenter_id=datacenter_id, server_id=node[\"id\"])\n\n return True", "def _exec_command_in_container(client, container, command):\n exec_id = client.exec_create(container, command)\n output = client.exec_start(exec_id).decode('utf-8')\n logger.info(output)\n return output", "def start( *args, **kwargs ):", "def start(self):\n try:\n print('Starting container: {}'.format(self.cfg['name']))\n start = self.docker_client.run(**self.env)\n except docker_errors.APIError as error:\n print(error)\n print('Container {} is already running'.format(self.cfg['name']))\n return self.cfg['name']\n\n return start", "def start_microservice(params) -> None:\n print(\"Starting microservice...\")\n cmd = \"docker run --rm --publish \" + params['microservice_port'] + \":\" + \\\n params['microservice_port'] + \" --detach --name bg_changer \" + \\\n \"--mount type=bind,source=\" + params['config_file'] + \\\n \",destination=/fakewebcam.ini bg_changer >/dev/null 2>&1\"\n if os.system(cmd) == 0:\n print(\" Success !\")\n else:\n print(\" Failure !\")", "def control_container(client, cont_name, action):\n try:\n container = client.containers.get(cont_name)\n print(\"{}ing container {}\".format(action, cont_name))\n if action == \"start\":\n container.start(wait=True)\n elif action == \"stop\":\n container.stop(wait=True)\n else:\n raise NameError(\"Uknown action specified: {}\".format(action))\n\n except pylxd.exceptions.LXDAPIException as err:\n print(\"error when trying to {} container {}\".format(action, cont_name))\n raise err", "def start(self):\n if self.session.status == Session.SessionStatus.RUNNING:\n return\n self._make_fifo()\n client = docker.from_env()\n\n volumes = {}\n if self.session.notebook:\n volumes.update(get_accessible_datasets_mount_dict(self.session.notebook))\n volumes.update({self.fifo_path: {\"bind\": f\"/{self.fifo_name}\"}})\n\n self.container = client.containers.run(\n \"m.docker-registry.ir/python:3.8-slim-buster\",\n [\"sh\", \"-c\", f\"python -i -u <>/{self.fifo_name}\"],\n stdin_open=True,\n detach=True,\n volumes=volumes\n )\n self.session.container_id = str(self.container.id)\n self.session.status = Session.SessionStatus.RUNNING\n self.session.run_counter = 1\n self.session.save()", "def docker_start(args, container_id, options=None): # type: (EnvironmentConfig, str, t.Optional[t.List[str]]) -> t.Tuple[t.Optional[str], t.Optional[str]]\n if not options:\n options = []\n\n for _iteration in range(1, 3):\n try:\n return docker_command(args, ['start'] + options + [container_id], capture=True)\n except SubprocessError as ex:\n display.error(ex.message)\n display.warning('Failed to start docker container \"%s\". Waiting a few seconds before trying again.' % container_id)\n time.sleep(3)\n\n raise ApplicationError('Failed to run docker container \"%s\".' % container_id)" ]
[ "0.71134686", "0.64065075", "0.6205256", "0.62037045", "0.61491776", "0.61437225", "0.6138378", "0.59322", "0.5895821", "0.58885634", "0.58424735", "0.5781214", "0.5735789", "0.57127005", "0.5699765", "0.5662697", "0.5651051", "0.56244475", "0.55596524", "0.55415326", "0.5539536", "0.55114067", "0.54982877", "0.54792297", "0.54791313", "0.5473533", "0.5442059", "0.5417381", "0.5403245", "0.53804356" ]
0.7372831
0
Run the provisioner of this class for a set of services
def provision(self, services, requirements=None): if hasattr(self, 'service_provisioner'): provisioner = self.service_provisioner(services=services, container=self, requirements=requirements) provisioner()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def provision(self, services, requirements=None):\n try:\n super(SolrDockerRunner, self).provision(services=services, requirements=requirements)\n except UnknownServiceError as error:\n self.logger.warning('Skipping unknown service: {}'.format(error))\n pass", "def perform_setup(self, services):\n pass", "def run_providers(self, argv):\n\n for name, provider in self.providermanager:\n provider = provider(self)\n self.produce_output(provider.title,\n provider.location,\n provider.run(argv))", "async def schedule_services_run(self):\n self._running_svcs = [svc.run() for svc in self.services]\n await self.add_task_fn(self._running_svcs)", "def run(self):\n if (self._args.as_json):\n self._log.debug(\"Getting services as json\")\n all_services = service.get_all_services(as_json=True)\n else:\n self._log.debug(\"Getting services as json schema\")\n all_services = service.get_all_services(as_json_schema=True)\n\n if (self._args.pretty):\n self._log.debug(\"Pretty-print formatting output\")\n import json\n all_services = json.dumps(json.loads(all_services), indent=4,\n separators=(',', ': '))\n\n if (self._args.file is not None):\n self._log.debug(\"Writing output\")\n try:\n outfile = open(self._args.file, 'w')\n outfile.write(all_services)\n outfile.close()\n except IOError as e:\n self._log.critical(\"Error while trying to write output: {}\".format(e.strerror))\n return 1\n else:\n print all_services\n\n return 0", "def provision(self, arguments):\n instance_name = arguments['<instance>']\n instance_name = self.activate(instance_name)\n\n vmrun = VMrun(self.vmx, self.user, self.password)\n\n if not vmrun.installedTools():\n puts_err(colored.red(\"Tools not installed\"))\n return\n\n provisioned = 0\n for i, provision in enumerate(self.get('provision', [])):\n\n if provision.get('type') == 'file':\n source = provision.get('source')\n destination = provision.get('destination')\n if utils.provision_file(vmrun, source, destination) is None:\n puts_err(colored.red(\"Not Provisioned\"))\n return\n provisioned += 1\n\n elif provision.get('type') == 'shell':\n inline = provision.get('inline')\n path = provision.get('path')\n args = provision.get('args')\n if not isinstance(args, list):\n args = [args]\n if utils.provision_shell(vmrun, inline, path, args) is None:\n puts_err(colored.red(\"Not Provisioned\"))\n return\n provisioned += 1\n\n else:\n puts_err(colored.red(\"Not Provisioned ({}\".format(i)))\n return\n else:\n puts_err(colored.green(\"Provisioned {} entries\".format(provisioned)))\n return\n\n puts_err(colored.red(\"Not Provisioned ({}\".format(i)))", "def run_services():\n for service in (\"minvd\", \"httpd\", \"ntpd\"):\n sudo(\"service %s start\" % service)\n sudo(\"chkconfig %s on\" % service)", "async def init_services(self) -> List[Service]:\n services = []\n schemas = defaultdict(dict)\n svc_classes = Service.get_plugins()\n\n schemas = Schema(self.schema_dir)\n if schemas:\n poller_schema = schemas.get_arrow_schema('sqPoller')\n poller_schema_version = SchemaForTable('sqPoller', schemas).version\n\n db_access = self._get_db_access(self.cfg)\n\n # Read the available services and iterate over them, discarding\n # the ones we do not need to instantiate\n svc_desc_files = Path(self.service_directory).glob('*.yml')\n\n for filename in svc_desc_files:\n with open(filename, 'r') as f:\n svc_def = yaml.safe_load(f.read())\n\n if not svc_def:\n logger.warning(f'Skip empty service file: {filename}')\n continue\n\n service = svc_def.get('service')\n if service in BLACKLIST_SERVICES:\n continue\n\n if all(service not in x for x in [self.svcs_list]):\n logger.warning(\n f\"Ignoring unspecified service {svc_def.get('service')}\"\n )\n continue\n\n if 'service' not in svc_def or 'apply' not in svc_def:\n logger.error(\n 'Ignoring invalid service file definition.'\n f\"'service' and 'apply' keywords: {filename}\"\n )\n continue\n\n period = svc_def.get('period', self.default_interval)\n for nos, cmds_desc in svc_def['apply'].items():\n\n # Check if the the current nos copies from another\n if isinstance(cmds_desc, dict) and 'copy' in cmds_desc:\n newval = svc_def['apply'].get(cmds_desc['copy'], None)\n if not newval:\n logger.error(\n f\"No device type {cmds_desc['copy']} to copy from,\"\n f\"for {nos} for service {svc_def['service']}\"\n )\n return\n cmds_desc = newval\n\n # Update the command description adding the\n # specification for the output parsing\n if isinstance(cmds_desc, list):\n for subele in cmds_desc:\n self._parse_nos_version(filename, svc_def, nos, subele)\n else:\n self._parse_nos_version(filename, svc_def, nos, cmds_desc)\n\n try:\n schema = SchemaForTable(svc_def['service'], schema=schemas)\n except Exception: # pylint: disable=broad-except\n logger.error(f\"No matching schema for {svc_def['service']}\")\n continue\n\n if schema.type == 'derivedRecord':\n # These are not real services and so ignore them\n continue\n\n # Valid service definition, add it to list\n # if the service has not a dedicated class, we will use the\n # default implementation\n class_to_use = svc_classes.get(svc_def['service'], Service)\n service = class_to_use(\n svc_def['service'],\n svc_def['apply'],\n period,\n svc_def.get('type', 'state'),\n svc_def.get('keys', []),\n svc_def.get('ignore-fields', []),\n schema,\n self.output_queue,\n db_access,\n self.run_mode\n )\n service.poller_schema = poller_schema\n service.poller_schema_version = poller_schema_version\n logger.info(f'Service {service.name} added')\n services.append(service)\n\n # Once done set the service list and return its content\n self._services = services\n return self._services", "def start_services(self):\n logger.info(\"Starting services: %s\", self.services)\n for service in self.services:\n with hide(*fab_quiet):\n sudo('service %s start' % service)", "def main():\n\n # import aws_ecs_services.arguments as arguments\n from .arguments import get_cli_arguments\n\n # args = arguments.get_cli_arguments()\n args = get_cli_arguments()\n\n by_service_dns = False\n by_service_name = False\n by_task_name = False\n list_clusters = False\n only_cluster_instances = False\n only_ec2_instances = False\n list_running_services = False\n list_running_tasks = False\n list_services = False\n list_projects = False\n use_config = False\n\n debug = args.debug\n if debug:\n logger.setLevel(logging.DEBUG)\n logger.debug(\"Show DEBUG information.\")\n stream_handler = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(f\"%(lineno)s: {logging.BASIC_FORMAT}\")\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n logger.propagate = False\n else:\n logger.setLevel(logging.INFO)\n\n # If a configuration file and a project are given,the configruation file is used.\n # Otherwise the cli ooptions are considerd.\n project = args.project\n # Variable replacement in config file uses '{service}'.\n service = args.service\n config = args.config\n if (\n os.path.exists(config)\n and project\n or args.subcommand\n in (\"list-configured-projects\", \"list-configured-services\")\n ):\n logger.info(f\"Loading config from: '{config}'.\")\n if not os.path.exists(config):\n logger.error(f\"No config file: '{config}'.\")\n return 1\n use_config = True\n\n if use_config:\n data = None\n try:\n with open(config, \"r\") as config_file:\n data = json.load(config_file)\n except (ValueError) as e:\n logger.error(\n f\"Check the JSON sytanx in the config file '{config}': '{str(e)}'\"\n )\n return 1\n logger.debug(f\"Data: {data}\")\n if not data or not isinstance(data, dict):\n logger.error(f\"Could not load configuration: '{data}'.\")\n return 1\n\n if use_config:\n region = data.get(\"region\", args.region)\n else:\n region = args.region\n\n if use_config:\n projects = data.get(\"projects\", {})\n if args.subcommand not in (\"list-configured-projects\"):\n if project not in projects:\n logger.error(\n f\"Missing configuration for project: '{project}'. Choose from {list(projects.keys())}.\"\n )\n return 1\n project_config = projects.get(project, None)\n if not project_config:\n logger.error(\n f\"Missing configuration for project: '{project}'. Choose from {list(projects.keys())}.\"\n )\n return 1\n region = project_config.get(\"region\", region)\n cluster_name = project_config.get(\"cluster\", \"\")\n # Variable replacement in config file uses '{cluster}'.\n cluster = cluster_name\n cluster_ = cluster\n\n # Get service-specific configuration.\n services = project_config.get(\"services\", {})\n service_config = None\n if services:\n service_config = services.get(service, None)\n logger.debug(f\"Service config: {service_config}\")\n if service_config:\n cluster_ = service_config.get(\"cluster\", cluster_name)\n\n cluster_name = replace_config(cluster_, \"cluster\", locals())\n else:\n cluster_name = args.cluster\n\n logger.info(f\"Working in: {region}\")\n\n session = boto3.session.Session()\n ecs_client = session.client(\"ecs\", region)\n ec2_client = session.client(\"ec2\", region)\n ssm_client = session.client(\"ssm\", region)\n\n if args.subcommand == \"by-service-dns\":\n by_service_dns = True\n if use_config:\n service_dns = project_config.get(\"dns\", \"\")\n service_dns_ = service_dns\n if service_config:\n service_dns_ = service_config.get(\"dns\", service_dns)\n service_dns = replace_config(service_dns_, \"service_dns\", locals())\n else:\n service_dns = args.dns\n if not service_dns:\n logger.error(f\"DNS name missing.\")\n return 1\n\n output_info = args.output\n elif args.subcommand == \"by-service-name\":\n by_service_name = True\n if use_config:\n service_name = project_config.get(\"name\", \"\")\n service_name_ = service_name\n if service_config:\n service_name_ = service_config.get(\"name\", service_name)\n service_name = replace_config(\n service_name_, \"service_name\", locals()\n )\n service_name = service_name if service_name else service\n else:\n service_name = args.name\n elif args.subcommand == \"by-task-name\":\n by_task_name = True\n if use_config:\n task_name = project_config.get(\"name\", \"\")\n task_name_ = task_name\n if service_config:\n task_name_ = service_config.get(\"name\", task_name)\n task_name = replace_config(task_name_, \"task_name\", locals())\n task_name = task_name if task_name else service\n else:\n task_name = args.name\n elif args.subcommand == \"list-ec2-instances\":\n only_ec2_instances = True\n elif args.subcommand == \"list-clusters\":\n list_clusters = True\n elif args.subcommand == \"list-instances\":\n only_cluster_instances = True\n elif args.subcommand == \"list-services\":\n list_running_services = True\n service_name = None\n elif args.subcommand == \"list-tasks\":\n list_running_tasks = True\n task_name = None\n elif args.subcommand == \"list-configured-services\":\n list_services = True\n service_name = None\n elif args.subcommand == \"list-configured-projects\":\n list_projects = True\n service_name = None\n\n if list_projects:\n if not use_config:\n logger.error(\"Only available when using a configuration file.\")\n return 1\n if not projects:\n logger.error(\n \"Could not load projects from configuration file: '{config}'.\"\n )\n return 1\n print(f\"Found in {config}.\")\n print(*list(projects.keys()), sep=\"\\n\")\n return\n\n # No 'cluster' necessary for 'list-clusters'.\n if not list_clusters and not only_ec2_instances and not cluster_name:\n logger.error(f\"Cluster name missing.\")\n return 1\n\n if list_services:\n if not use_config:\n logger.error(\"Only available when using a configuration file.\")\n return 1\n if not services:\n logger.error(\n \"Could not load services from configuration file: '{config}'.\"\n )\n return 1\n print(f\"Found in {config}.\")\n print(*services, sep=\"\\n\")\n return\n elif only_ec2_instances:\n instances = get_instances_form_ec2(client=ec2_client)\n print(json.dumps(instances))\n return\n elif list_clusters:\n clusters = get_clusters(client=ecs_client)\n print(\"\\n\".join(clusters))\n return\n elif only_cluster_instances:\n logger.info(f\"Checking cluster: {cluster_name}\")\n instance_ids = get_instance_ids_from_cluster(\n cluster=cluster_name, client=ecs_client\n )\n print(\" \".join(instance_ids))\n return\n elif by_service_name or list_running_services:\n logger.info(f\"Checking cluster: {cluster_name}\")\n instance_ids = get_instance_ids_from_cluster(\n cluster=cluster_name, client=ecs_client\n )\n instance_id = get_instance_id_by_service_name(\n instance_ids=instance_ids,\n service=service_name,\n list_services=list_running_services,\n client=ssm_client,\n region=region,\n )\n\n return\n elif by_task_name or list_running_tasks:\n logger.info(f\"Checking cluster: {cluster_name}\")\n instance_ids = get_tasks_information(\n task=task_name,\n list_tasks=list_running_tasks,\n cluster=cluster_name,\n client=ecs_client,\n )\n print(instance_ids)\n\n return\n elif by_service_dns:\n logger.info(f\"Checking cluster: {cluster_name}\")\n service_ip = get_host_ip(host_name=service_dns)\n logger.info(f\"IP of {service_dns} is {service_ip}\")\n logger.debug(f\"Output: {output_info}.\")\n if output_info == \"service\":\n print(service_ip)\n return\n else:\n logger.debug(f\"Get instance IDs for cluster:' {cluster_name}'.\")\n instance_ids = get_instance_ids_from_cluster(\n cluster=cluster_name, client=ecs_client\n )\n logger.debug(instance_ids)\n logger.debug(\"Get instance details.\")\n (\n instance_private_ip,\n instance_private_dns,\n instance_id,\n ) = get_instance_info_by_service_dns(\n instance_ids=instance_ids,\n service_ip=service_ip,\n client=ec2_client,\n )\n if output_info == \"ip\":\n print(instance_private_ip)\n return\n elif output_info == \"id\":\n print(instance_id)\n return\n elif output_info == \"all\":\n print(instance_private_ip, instance_id, instance_private_dns)\n return\n logger.error(f\"Not the expected result - nothing accomplished.\")\n return 1", "def _add_services(self):\n # Services and relations which are present merely to satisfy\n # required_interfaces and workload status are not inspected.\n # Fix me. Inspect those too.\n this_service = {'name': 'neutron-openvswitch'}\n other_services = [\n {'name': 'nova-compute'},\n {'name': 'nova-cloud-controller'},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'glance'},\n {'name': 'neutron-api'},\n self.get_percona_service_entry(),\n ]\n if self._get_openstack_release() >= self.bionic_train:\n other_services.append({'name': 'placement'})\n super(NeutronOVSBasicDeployment, self)._add_services(this_service,\n other_services)", "def provision(self, urns, client_cert, credentials, best_effort, end_time,\n geni_users):\n se_manifest, se_slivers, last_slice = SERMv3ManifestFormatter(), [], \"\"\n slivers=[]\n\n for urn in urns:\n if self._verify_users:\n logger.debug(\"provision: authenticate the user...\")\n client_urn, client_uuid, client_email =\\\n self.auth(client_cert, credentials, urn, (\"renewsliver\",))\n logger.info(\"Client urn=%s, uuid=%s, email=%s\" % (\n client_urn, client_uuid, client_email,))\n\n logger.info(\"urn=%s, best_effort=%s, end_time=%s, geni_users=%s\" % (\n urn, best_effort, end_time, geni_users,))\n\n links_db, nodes, links = self.SESlices.get_link_db(urn)\n sliceVlansPairs = self.SESlices.get_slice_vlan_pairs(urn)\n self.SESlices._create_manifest_from_req_n_and_l(se_manifest, nodes,links, sliceVlansPairs)\n reservation_ports = self.SESlices._allocate_ports_in_slice(nodes)[\"ports\"]\n\n if end_time != None:\n alarm_time = end_time\n SESchedulerService.get_scheduler().add_job( se_job_unprovision,\n \"date\",\n run_date=alarm_time,\n args=[datetime.now(),\n links_db,\n urn])\n\n \n # Retrieve allocation status and modify after the method's operation\n links_db, _, _ = self.__update_fetch_allocation_status_slivers(urn, \"geni_provisioned\")\n\n if end_time != None:\n alarm_time = end_time\n SESchedulerService.get_scheduler().add_job( se_job_unprovision,\n \"date\",\n run_date=alarm_time,\n args=[datetime.now(),\n links_db,\n urn])\n\n for sliver in links_db[\"geni_sliver_urn\"]:\n slivers.append( \n { \n \"geni_sliver_urn\": sliver,\n \"geni_expires\": end_time,\n \"geni_allocation_status\": links_db[\"geni_allocation_status\"],\n \"geni_operational_status\": links_db[\"geni_operational_status\"],\n }\n )\n\n logger.info(\"provision successfully completed: %s\", urn)\n\n return str(se_manifest), slivers", "def services(self, services):\n\n self._services = services", "def services(self, services):\n\n self._services = services", "def _add_services(self):\n this_service = {'name': 'keystone'}\n other_services = [\n {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'}, # satisfy wrkload stat\n {'name': 'cinder'},\n ]\n super(KeystoneBasicDeployment, self)._add_services(this_service,\n other_services)", "def tempest_ceph_services_tun(self):\n self.helper_ceph_services('tun')", "def add_services(self):\n # first get the names\n names = str(self.client.console_execute('services -c name {0}\\n'.format(self.ip))[b'data'])\n while not 'name' in names:\n sleep(10)\n names = self.client.console_read()\n names = names.split('\\n')\n for row in names:\n if self.ip in row:\n row = strip_whitespaces(row)\n self.services.append({'name': row.split(' ')[1]})\n\n # get the ports by service name\n ports = str(self.client.console_execute('services -c port {0}\\n'.format(self.ip))[b'data'])\n while not 'port' in ports:\n sleep(10)\n ports = self.client.console_read()\n ports = ports.split('\\n')\n for row in ports:\n for service in self.services:\n if service['name'] in row:\n row = strip_whitespaces(row)\n service['port'] = row.split(' ')[1]\n\n # get some information by service name (only useful if a report shall be generated)\n info = str(self.client.console_execute('services -c info {0}\\n'.format(self.ip))[b'data'])\n while not 'info' in info:\n sleep(10)\n info = self.client.console_read()\n info = info.split('\\n')\n for row in info:\n for service in self.services:\n if service['name'] in row:\n row = strip_whitespaces(row)\n service['info'] = row.split(' ')[1]", "def generate_virtual_services(offering, error_tr):\n \n # Temporary variables used for calculating provisioning features of virtual provisioning services.\n accuracy = {}\n latency = {}\n power = {}\n\n # Return variable- specification of the virtual provisioning services.\n offer = {}\n\n # Initial provisioning services are real provisioning services - mapping 1 to 1.\n for iterator, key in enumerate(offering):\n offer[iterator] = {}\n offer[iterator]['accuracy'] = offering[key]['accuracy']\n offer[iterator]['latency'] = offering[key]['latency']\n offer[iterator]['power_consumption'] = offering[key]['power_consumption']\n offer[iterator]['elements'] = [key]\n accuracy[key] = offering[key]['accuracy']\n latency[key] = offering[key]['latency']\n power[key] = offering[key]['power_consumption']\n \n # Combine services with similar accuracy - based on accuracy threshold error_tr\n final_virtual_services = []\n for key1 in accuracy:\n combination_input = []\n combination_input.append(key1)\n for key2 in accuracy:\n if key1 != key2 and abs(accuracy[key1] - accuracy[key2]) < error_tr:\n combination_input.append(key2)\n\n if len(combination_input) > 1:\n final_virtual_services.append(combine(combination_input))\n\n final_virtual_services = list(itertools.chain.from_iterable(final_virtual_services))\n final_virtual_services = [list(t) for t in set(map(tuple, final_virtual_services))]\n\n # Calculate provisioning features for the added virtual services. \n iterator += 1\n if all(isinstance(i, list) for i in final_virtual_services):\n for combo in final_virtual_services: \n offer[iterator] = {}\n offer[iterator]['accuracy'] = numpy.mean([accuracy[i] for i in combo])/math.sqrt(len(combo))\n offer[iterator]['latency'] = numpy.max([latency[i] for i in combo])\n offer[iterator]['power_consumption'] = numpy.sum([power[i] for i in combo])\n offer[iterator]['elements'] = combo \n iterator += 1\n elif len(final_virtual_services) > 1:\n offer[iterator] = {}\n offer[iterator]['accuracy'] = numpy.mean([accuracy[i] for i in final_virtual_services])/math.sqrt(len(final_virtual_services))\n offer[iterator]['latency'] = numpy.max([latency[i] for i in final_virtual_services])\n offer[iterator]['power_consumption'] = numpy.sum([power[i] for i in final_virtual_services])\n offer[iterator]['elements'] = list(final_virtual_services)\n iterator += 1\n\n return offer", "def services(**kwargs):\n pass", "def cli(ctx, verbose, services):\n\n if verbose:\n logging.basicConfig(level=logging.DEBUG)\n click.echo('Verbose mode is ON')\n else:\n logging.basicConfig(level=logging.WARNING)\n\n if verbose:\n click.echo(\"Using services:\")\n click.echo(pformat(services))\n\n # Runner does not instantiate ctx properly\n if not ctx.obj:\n ctx.obj = {}\n\n service_mgr = EndpointManager(ep_descs=services)\n\n ctx.obj['services'] = service_mgr", "def __run_instances(self, number=1, policies={}):\n try:\n self.euca = Euca2ool('k:n:t:g:d:f:z:',\n ['key=', 'kernel=', 'ramdisk=', 'instance-count=', 'instance-type=',\n 'group=', 'user-data=', 'user-data-file=', 'addressing=', 'availability-zone='])\n except Exception, ex:\n sys.exit(1)\n\n instance_type = policies.get('instance_type') or 'm1.small'\n image_id = policies.get('image_id') or self.__get_image_id()[0]\n min_count = number\n max_count = number\n keyname = 'mykey'\n \n kernel_id = None\n ramdisk_id = None\n group_names = []\n user_data = None\n addressing_type = None\n zone = None\n user_data = None\n \n if image_id:\n euca_conn = self.__make_connection()\n try:\n reservation = euca_conn.run_instances(image_id = image_id,\n min_count = min_count,\n max_count = max_count,\n key_name = keyname,\n security_groups = group_names,\n user_data = user_data,\n addressing_type = addressing_type,\n instance_type = instance_type,\n placement = zone,\n kernel_id = kernel_id,\n ramdisk_id = ramdisk_id)\n except Exception, ex:\n self.euca.display_error_and_exit('error:%s' % ex)\n return reservation\n return False", "def __init__(self, services=None, verbose=False, responses=None):\n if not services:\n from moneywagon import ALL_SERVICES\n services = ALL_SERVICES\n\n self.services = []\n for ServiceClass in services:\n self.services.append(\n ServiceClass(verbose=verbose, responses=responses)\n )\n\n self.verbose = verbose\n self._successful_service = None # gets filled in after success\n self._failed_services = []", "async def start_all(self):\n try:\n for service in self.services:\n try:\n await service.start()\n await service.healthcheck()\n except Exception as e:\n log.exception(\"Exception while starting %s service\", service)\n raise ServiceStartupException from e\n self.started_services.append(service)\n except ServiceStartupException:\n log.error(\"Stopping services on startup failure\")\n await self.stop_all()\n raise", "def _add_services(self):\n this_service = {'name': '{{ metadata.package }}'}\n other_services = [\n {'name': 'mysql',\n 'location': 'cs:percona-cluster',\n 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'manila'}\n ]\n super(ManilaPluginCharmDeployment, self)._add_services(\n this_service, other_services)", "def startService(self):\n super(SpawnerService, self).startService()\n for spawn in self.pendingSpawns:\n self.spawn(*spawn)\n self.pendingSpawns = []", "def run(self):\n for tool in self.tools:\n tool.run()\n return", "def run(self):\n self._list_servers()", "def start_all(provision_vm='False'):\n subprocess.check_call(['./run.py'], shell=True, cwd=orc8_docker_path)\n subprocess.check_call('docker-compose up -d', shell=True, cwd=feg_docker_integ_test_path)\n subprocess.check_call(\n 'fab start_magma:provision_vm=%s' % provision_vm,\n shell=True, cwd=agw_path,\n )", "def test_01_service_offerings(self):\n # Validate the following\n # 1. Create a project.\n # 2. List service offerings for the project. All SO available in the\n # domain can be used for project resource creation.\n\n # Create project as a domain admin\n project = Project.create(\n self.apiclient,\n self.services[\"project\"],\n account=self.account.name,\n domainid=self.account.domainid\n )\n # Cleanup created project at end of test\n self.cleanup.append(project)\n self.debug(\"Created project with domain admin with ID: %s\" %\n project.id)\n\n self.debug(\n \"Deploying VM instance for project: %s & service offering: %s\" % (\n project.id,\n self.service_offering.id\n ))\n virtual_machine = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n serviceofferingid=self.service_offering.id,\n projectid=project.id\n )\n # Verify VM state\n self.assertEqual(\n virtual_machine.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n return", "def hostsplit_service(self):\n self.which_owner()\n self.which_security()\n\n for service, value in self.service_discovery.items():\n self.details[\"services\"][service] = self.which_service(service, **value)" ]
[ "0.6875356", "0.6774575", "0.6473266", "0.6383183", "0.6231173", "0.6137074", "0.60712975", "0.6065705", "0.59697276", "0.5928502", "0.58171874", "0.5816484", "0.58089393", "0.58089393", "0.57664543", "0.5763337", "0.57344735", "0.56776404", "0.565801", "0.5621586", "0.56179136", "0.56176925", "0.56140625", "0.5609898", "0.5608665", "0.55939597", "0.55763936", "0.5564522", "0.55642045", "0.5563541" ]
0.7373676
0
Override default provisioning behaviour to skip services that are unknown.
def provision(self, services, requirements=None): try: super(SolrDockerRunner, self).provision(services=services, requirements=requirements) except UnknownServiceError as error: self.logger.warning('Skipping unknown service: {}'.format(error)) pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_no_adapter_opts_ignore_service_type(self):\n self.oslo_config_dict['heat'] = None\n self.assert_service_disabled(\n 'orchestration',\n \"Not in the list of requested service_types.\",\n # 'orchestration' absent from this list\n service_types=['compute'],\n )", "def test_unknown_service(self):\n raise NotImplementedError # FIXME", "def test_no_overprovision(self):\n command_line = (\n self._MENU + [self._POOLNAME] + self._DEVICES + [\"--no-overprovision\"]\n )\n TEST_RUNNER(command_line)", "def unrecognised_service(service_name):\n print('Service {} not (yet) supported.'.format(service_name))\n pass", "def test_no_such_conf_section_ignore_service_type(self):\n del self.oslo_config_dict['heat']\n self.assert_service_disabled(\n 'orchestration',\n \"Not in the list of requested service_types.\",\n # 'orchestration' absent from this list\n service_types=['compute'],\n )", "def test_no_endpoint_ignore_service_type(self):\n self.assert_service_disabled(\n 'monitoring',\n \"Not in the list of requested service_types.\",\n # 'monitoring' absent from this list\n service_types={'compute', 'orchestration', 'bogus'},\n )", "def _add_services(self):\n # Services and relations which are present merely to satisfy\n # required_interfaces and workload status are not inspected.\n # Fix me. Inspect those too.\n this_service = {'name': 'neutron-openvswitch'}\n other_services = [\n {'name': 'nova-compute'},\n {'name': 'nova-cloud-controller'},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'glance'},\n {'name': 'neutron-api'},\n self.get_percona_service_entry(),\n ]\n if self._get_openstack_release() >= self.bionic_train:\n other_services.append({'name': 'placement'})\n super(NeutronOVSBasicDeployment, self)._add_services(this_service,\n other_services)", "def test_dontStartPrivilegedService(self):\n ports = self._privilegedStartService(self.highPortNumber)\n self.assertEqual(ports, [])", "def _add_services(self):\n this_service = {'name': 'swift-proxy'}\n other_services = [\n {'name': 'percona-cluster'},\n {'name': 'keystone'},\n {'name': 'glance'},\n {'name': 'swift-storage'}\n ]\n super(SwiftProxyBasicDeployment, self)._add_services(this_service,\n other_services)", "def provision(self, services, requirements=None):\n if hasattr(self, 'service_provisioner'):\n provisioner = self.service_provisioner(services=services,\n container=self,\n requirements=requirements)\n provisioner()", "def _add_services(self):\n this_service = {'name': 'keystone'}\n other_services = [\n {'name': 'percona-cluster', 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'}, # satisfy wrkload stat\n {'name': 'cinder'},\n ]\n super(KeystoneBasicDeployment, self)._add_services(this_service,\n other_services)", "def deploy_common_services():\n put('./minion/*', '/etc/systemd/system', use_sudo=True)\n sudo('source /etc/environment')\n sudo('/opt/bin/substitute_private_ipv4.sh /etc/systemd/system/flannel.service')\n sudo('/opt/bin/substitute_private_ipv4.sh /etc/systemd/system/kubelet.service')\n\n sudo('systemctl enable /etc/systemd/system/flannel.service')\n sudo('systemctl enable /etc/systemd/system/docker.service')\n sudo('systemctl enable /etc/systemd/system/kube-proxy.service')\n sudo('systemctl enable /etc/systemd/system/kubelet.service')\n\n sudo('systemctl daemon-reload')\n\n sudo('systemctl start flannel')\n sudo('systemctl start docker')\n sudo('systemctl start kube-proxy')\n sudo('systemctl start kubelet')", "def preflight_check(self):\n\n if not self.vm.suite in self.suites:\n raise VMBuilderUserError('Invalid suite. Valid suites are: %s' % ' '.join(self.suites))\n \n modname = 'VMBuilder.plugins.ubuntu.%s' % (self.vm.suite, )\n mod = __import__(modname, fromlist=[self.vm.suite])\n self.suite = getattr(mod, self.vm.suite.capitalize())(self.vm)\n\n if self.vm.arch not in self.valid_archs[self.host_arch] or \\\n not self.suite.check_arch_validity(self.vm.arch):\n raise VMBuilderUserError('%s is not a valid architecture. Valid architectures are: %s' % (self.vm.arch, \n ' '.join(self.valid_archs[self.host_arch])))\n\n if not self.vm.components:\n self.vm.components = ['main', 'restricted', 'universe']\n else:\n if type(self.vm.components) is str:\n self.vm.components = self.vm.components.split(',')\n\n if self.vm.hypervisor.name == 'Xen':\n logging.info('Xen kernel default: linux-image-%s %s', self.suite.xen_kernel_flavour, self.xen_kernel_version())\n\n self.vm.virtio_net = self.use_virtio_net()\n\n if self.vm.lang:\n try:\n run_cmd('locale-gen', '%s' % self.vm.lang)\n except VMBuilderException, e:\n msg = \"locale-gen does not recognize your locale '%s'\" % self.vm.lang\n raise VMBuilderUserError(msg)\n\n if hasattr(self.vm, \"ec2\") and self.vm.ec2:\n self.get_ec2_kernel()\n self.get_ec2_ramdisk()\n\n if not self.vm.addpkg:\n self.vm.addpkg = []\n\n self.vm.addpkg += ['ec2-init',\n 'openssh-server',\n 'ec2-modules',\n 'standard^',\n 'ec2-ami-tools',\n 'update-motd']\n\n if self.vm.ec2_landscape:\n logging.info('Installing landscape support')\n self.vm.addpkg += ['landscape-client']\n\n if not hasattr(self.vm, \"ppa\") or not self.vm.ppa:\n self.vm.ppa = []\n\n self.vm.ppa += ['ubuntu-on-ec2/ppa']", "def test_no_adapter_opts(self):\n self.oslo_config_dict['heat'] = None\n self.assert_service_disabled(\n 'orchestration',\n \"Encountered an exception attempting to process config for \"\n \"project 'heat' (service type 'orchestration'): no such option\",\n )", "def test_unknown_resource_under_service(self):\n raise NotImplementedError # FIXME", "def ignore_services(self) -> List[str]:\n return self.data.get(\"ignore_services\", [])", "def perform_setup(self, services):\n pass", "def no_service_msg(self, *args, **kwargs):\n return \"All either skipped or failed.\"", "def check_services(self):\n for service in self.services:\n try:\n self.cloud.search_services(service)[0]\n except Exception: # pylint: disable=broad-except\n self.is_skipped = True\n break", "def _add_services(self):\n this_service = {'name': '{{ metadata.package }}'}\n other_services = [\n {'name': 'mysql',\n 'location': 'cs:percona-cluster',\n 'constraints': {'mem': '3072M'}},\n {'name': 'rabbitmq-server'},\n {'name': 'keystone'},\n {'name': 'manila'}\n ]\n super(ManilaPluginCharmDeployment, self)._add_services(\n this_service, other_services)", "def _remove_default_service_configs(cls, volume_config):\n for key in VolumeService.SUPPORTED_SERVICES:\n if key in volume_config and hasattr(volume_config[key], 'from_default') and volume_config[key].from_default:\n del volume_config[key]", "def scan_provisioned_products(search_pp_name, client: boto3.client) -> dict:\n logger.info('Making sure Control Tower is not already executing')\n paginator = client.get_paginator(\"scan_provisioned_products\")\n for page in paginator.paginate(\n AccessLevelFilter={\n 'Key': 'Account',\n 'Value': 'self'\n }\n ):\n for x in page['ProvisionedProducts']:\n if x['Type'] == 'CONTROL_TOWER_ACCOUNT':\n\n # Since Control Tower has a serial method of deploying account this statement will check to see if\n # there's and existing In-Progress deployment and will return provision the product name / status\n if x['Status'] == 'UNDER_CHANGE' and x['Name'] != search_pp_name:\n logger.info(f\"Found In-Progress Control Tower Deployment ({x['Name']})\")\n return {\"ProvisionedProductName\": x['Name'], \"Status\": x['Status']}\n\n # If existing provision product found return\n elif x['Name'] == search_pp_name:\n logger.info(f\"Found {x}\")\n\n # Removing Create time since it doesn't serializable JSON well\n del x['CreatedTime']\n return x", "def setUpClass(cls):\n super(BaseCharmOperationTest, cls).setUpClass()\n cls.services = ['NotImplemented'] # This must be overridden\n cls.nrpe_checks = ['NotImplemented'] # This must be overridden", "def generate_virtual_services(offering, error_tr):\n \n # Temporary variables used for calculating provisioning features of virtual provisioning services.\n accuracy = {}\n latency = {}\n power = {}\n\n # Return variable- specification of the virtual provisioning services.\n offer = {}\n\n # Initial provisioning services are real provisioning services - mapping 1 to 1.\n for iterator, key in enumerate(offering):\n offer[iterator] = {}\n offer[iterator]['accuracy'] = offering[key]['accuracy']\n offer[iterator]['latency'] = offering[key]['latency']\n offer[iterator]['power_consumption'] = offering[key]['power_consumption']\n offer[iterator]['elements'] = [key]\n accuracy[key] = offering[key]['accuracy']\n latency[key] = offering[key]['latency']\n power[key] = offering[key]['power_consumption']\n \n # Combine services with similar accuracy - based on accuracy threshold error_tr\n final_virtual_services = []\n for key1 in accuracy:\n combination_input = []\n combination_input.append(key1)\n for key2 in accuracy:\n if key1 != key2 and abs(accuracy[key1] - accuracy[key2]) < error_tr:\n combination_input.append(key2)\n\n if len(combination_input) > 1:\n final_virtual_services.append(combine(combination_input))\n\n final_virtual_services = list(itertools.chain.from_iterable(final_virtual_services))\n final_virtual_services = [list(t) for t in set(map(tuple, final_virtual_services))]\n\n # Calculate provisioning features for the added virtual services. \n iterator += 1\n if all(isinstance(i, list) for i in final_virtual_services):\n for combo in final_virtual_services: \n offer[iterator] = {}\n offer[iterator]['accuracy'] = numpy.mean([accuracy[i] for i in combo])/math.sqrt(len(combo))\n offer[iterator]['latency'] = numpy.max([latency[i] for i in combo])\n offer[iterator]['power_consumption'] = numpy.sum([power[i] for i in combo])\n offer[iterator]['elements'] = combo \n iterator += 1\n elif len(final_virtual_services) > 1:\n offer[iterator] = {}\n offer[iterator]['accuracy'] = numpy.mean([accuracy[i] for i in final_virtual_services])/math.sqrt(len(final_virtual_services))\n offer[iterator]['latency'] = numpy.max([latency[i] for i in final_virtual_services])\n offer[iterator]['power_consumption'] = numpy.sum([power[i] for i in final_virtual_services])\n offer[iterator]['elements'] = list(final_virtual_services)\n iterator += 1\n\n return offer", "def aggregator_unavailable_apiservice(self, metric, scraper_config):\n for sample in metric.samples:\n sample[self.SAMPLE_LABELS][\"apiservice_name\"] = sample[self.SAMPLE_LABELS].pop(\"name\")\n self.submit_metric('.aggregator_unavailable_apiservice', metric, scraper_config, monotonic_count=False)", "def test_dos_list_service_huge_junk(self):\n # create a huge list of domain\n attack_string = \"1\" * 3500\n params = {\"junk\": attack_string}\n resp = self.client.list_services(param=params)\n self.assertTrue(resp.status_code < 503)", "def _should_profile_production_default():\n return False", "def _configure_services(self):\n keystone_config = {\n 'admin-password': 'openstack',\n 'admin-token': 'ubuntutesting',\n }\n manila_config = {\n 'default-share-backend': 'generic',\n }\n manila_generic_config = {\n 'driver-handles-share-servers': False,\n }\n configs = {\n 'keystone': keystone_config,\n 'manila': manila_config,\n 'manila-generic': manila_generic_config,\n }\n super(ManilaPluginCharmDeployment, self)._configure_services(configs)", "def ignore_missing_vnet_service_endpoint(self) -> Optional[bool]:\n return pulumi.get(self, \"ignore_missing_vnet_service_endpoint\")", "def ignore_missing_service_endpoint(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"ignore_missing_service_endpoint\")" ]
[ "0.61637455", "0.6001985", "0.59038186", "0.59035575", "0.5794396", "0.5754696", "0.5706223", "0.5533597", "0.54840475", "0.54076886", "0.5395336", "0.5342867", "0.5318276", "0.5301154", "0.52795595", "0.5248005", "0.5178702", "0.5173064", "0.5153804", "0.51238096", "0.5121864", "0.51075256", "0.5101915", "0.5071131", "0.505505", "0.5048358", "0.50293064", "0.5014715", "0.4976023", "0.49628538" ]
0.6283957
0
Class factory for the docker runner. Returns a specific class for a specific type of service.
def docker_runner_factory(image): mapping = { 'gunicorn': GunicornDockerRunner, 'redis': RedisDockerRunner, 'consul': ConsulDockerRunner, 'postgres': PostgresDockerRunner, 'registrator': RegistratorDockerRunner, 'solr': SolrDockerRunner } for key in mapping: if key in image: return mapping[key] return DockerRunner
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def instantiateServiceType(self):\n if self.codebase:\n module = self.codebase.instantiate(self.service_module_name)\n\n if self.service_class_name not in module.__dict__:\n raise Exception(\n \"Provided module %s at %s has no class %s. Options are:\\n%s\"\n % (\n self.service_module_name,\n module.__file__,\n self.service_class_name,\n \"\\n\".join([\" \" + x for x in sorted(module.__dict__)]),\n )\n )\n\n service_type = module.__dict__[self.service_class_name]\n else:\n\n def _getobject(modname, attribute):\n mod = __import__(modname, fromlist=[attribute])\n return mod.__dict__[attribute]\n\n service_type = _getobject(self.service_module_name, self.service_class_name)\n\n return service_type", "def factory(container, name, factory):", "def instantiate_runners(self):\n for _, a in self.wf['action'].items():\n if 'docker://' in a['uses']:\n a['runner'] = DockerRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n continue\n\n if 'shub://' in a['uses']:\n a['runner'] = SingularityRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n continue\n\n if './' in a['uses']:\n if os.path.exists(os.path.join(a['uses'], 'Dockerfile')):\n a['runner'] = DockerRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n elif os.path.exists(os.path.join(a['uses'],\n 'singularity.def')):\n a['runner'] = SingularityRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n else:\n a['runner'] = HostRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n continue\n\n dockerfile_path = os.path.join(a['repo_dir'], a['action_dir'],\n 'Dockerfile')\n singularityfile_path = os.path.join(a['repo_dir'], a['action_dir'],\n 'singularity.def')\n\n if os.path.exists(dockerfile_path):\n a['runner'] = DockerRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n elif os.path.exists(singularityfile_path):\n a['runner'] = SingularityRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)\n else:\n a['runner'] = HostRunner(\n a, self.workspace, self.env,\n self.quiet, self.debug, self.dry_run)", "def named_factory(klass):\n\n class _factory(Location):\n zope.interface.implements(INamedUtilBase)\n def __init__(self):\n self.title = klass.title\n self.label = klass.label\n self.description = klass.description\n def __call__(self, *a, **kw):\n # returns an instantiated factory with a context\n factory = klass(*a, **kw)\n factory.__name__ = self.__name__\n return factory\n # create/return instance of the factory that instantiates the \n # classes below.\n return _factory()", "def get_factory(self, class_name):\n if class_name in self._class_name_class_dict:\n return self._class_name_class_dict[class_name]()\n else:\n raise ModuleNotFoundError(\"Module should be in {}\".format(self.factory_names))", "def get_class(self, name: str) -> Type:\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'new instance of {name}')\n name = self.default_name if name is None else name\n if logger.isEnabledFor(logging.DEBUG):\n logger.debug(f'creating instance of {name}')\n class_name, params = self._class_name_params(name)\n return self._find_class(class_name)", "def container_factory(self, name):", "def container_factory(self, name):", "def container_factory(self, name):", "def container_factory(self, name):", "def container_factory(self, name):", "def get_factory(self):\n\n return Factory(type(self), self.kwargs)", "def build_runner(config):\n if not isinstance(config, dict) or 'runner_type' not in config:\n raise ValueError('`runner_type` is missing from configuration!')\n\n runner_type = config['runner_type']\n if runner_type not in _RUNNERS:\n raise ValueError(f'Invalid runner type: `{runner_type}`!\\n'\n f'Types allowed: {list(_RUNNERS)}.')\n return _RUNNERS[runner_type](config)", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .strahler import Strahler\n return Strahler(iface)", "def factory(**pattern):\n\n class_name = pattern.get('class')\n del (pattern['class'])\n\n # pprint(inspect.stack()[1][0].f_globals)\n _cls = inspect.stack()[1][0].f_globals[class_name]\n\n # _cls = globals()[class_name]\n return _cls(**pattern)", "def classFactory(iface):\n #\n from .snail import Snail\n\n return Snail(iface)", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .lizard_downloader import LizardDownloader\n return LizardDownloader(iface)", "def test_runner_class(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"test_runner_class\")", "def test_runner_class(self) -> str:\n return pulumi.get(self, \"test_runner_class\")", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .osm_tools import OSMtools\n return OSMtools(iface)", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .nuclear_energy_plant_radiation_module import energy_plant_radiation_class\n return energy_plant_radiation_class(iface)", "def __new__(cls, conf):\n # Call is already for a subclass, so pass it through\n RunnerClass = cls\n return super(Runner, cls).__new__(RunnerClass)", "def service_instance(self):\n return self.service_class(self)", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .quick_digitize import QuickDigitize\n return QuickDigitize(iface)", "def get_class(name):\n try:\n cls, constructor = registry[name]\n except KeyError:\n raise UnregisteredClassError(\"'%s' is not a registered \"\n \"JSONAlizable class name\" % name, name)\n if constructor is not None:\n return constructor\n return cls", "def get_factory(type):\n if not multisearch.utils.is_safe_backend_name(type):\n raise ImportError(\"Backend type %r not known\" % type)\n factory = _factories.get(type, None)\n if factory is None:\n module_name = \"multisearch.backends.%s_backend\" % type\n m = __import__(module_name, fromlist=['SearchClient'], level=0)\n factory = m.SearchClient\n _factories[type] = factory\n return factory", "def get_class(self, name):\n return self.host.get_class(name)", "def build(self, factory, *factory_args, **factory_kw):\n return self._instantiate(\"\", factory, factory_args, factory_kw)", "def construct_class_by_name(name, *args, **kwargs):\n parts = name.split('.')\n module_name, class_name = '.'.join(parts[:-1]), parts[-1]\n module = importlib.import_module(module_name)\n return getattr(module, class_name)(*args, **kwargs)", "def get_factory():" ]
[ "0.5989068", "0.590695", "0.58675516", "0.5845438", "0.57969826", "0.57343787", "0.5721487", "0.5721487", "0.5721487", "0.5721487", "0.5721487", "0.56905794", "0.56888777", "0.56245327", "0.5598864", "0.5569812", "0.5477469", "0.5466489", "0.5464835", "0.5444313", "0.5389793", "0.53855", "0.5330174", "0.53169805", "0.53071356", "0.53052956", "0.52946097", "0.5292632", "0.52818686", "0.527341" ]
0.7235535
0
Parse Amazon tracking numbers.
def parse_amazon(email): tracking_numbers = [] soup = BeautifulSoup(email[EMAIL_ATTR_BODY], 'html.parser') # see if it's an shipped order email order_number_match = re.search('Your AmazonSmile order #(.*?) has shipped', email[EMAIL_ATTR_SUBJECT]) if not order_number_match: order_number_match = re.search('Your Amazon.com order #(.*?) has shipped', email[EMAIL_ATTR_SUBJECT]) if not order_number_match: return tracking_numbers order_number = order_number_match.group(1) # find the link that has 'track package' text linkElements = soup.find_all('a') for linkElement in linkElements: if not re.search(r'track package', linkElement.text, re.IGNORECASE): continue # if found we no get url and check for duplicates link = linkElement.get('href') # make sure we dont have dupes order_numbers = list(map(lambda x: x['tracking_number'], tracking_numbers)) if order_number not in order_numbers: tracking_numbers.append({ 'link': link, 'tracking_number': order_number }) return tracking_numbers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_amazon_de(email):\n tracking_numbers = []\n \n soup = BeautifulSoup(email[EMAIL_ATTR_BODY], 'html.parser')\n\n # see if it's an shipped order email\n order_number_match = re.search('Order: #(.*?)\\n', email[EMAIL_ATTR_BODY])\n if not order_number_match:\n order_number_match = re.search('Your Amazon.de order of (.*?) has been dispatched!', email[EMAIL_ATTR_SUBJECT])\n if not order_number_match:\n return tracking_numbers\n\n order_number = order_number_match.group(1)\n\n # find the link that has 'track your package' text\n linkElements = soup.find_all('a')\n for linkElement in linkElements:\n if not re.search(r'track your package', linkElement.text, re.IGNORECASE):\n continue\n \n # if found we no get url and check for duplicates\n link = linkElement.get('href')\n\n # make sure we dont have dupes\n order_numbers = list(map(lambda x: x['tracking_number'], tracking_numbers))\n if order_number not in order_numbers:\n tracking_numbers.append({\n 'link': link,\n 'tracking_number': order_number\n })\n\n return tracking_numbers", "def parse_thrift_books(email):\n tracking_numbers = []\n\n soup = BeautifulSoup(email[EMAIL_ATTR_BODY], 'html.parser')\n elements = soup.find_all('a')\n\n for element in elements:\n link = element.get('href')\n\n if not link:\n continue\n\n if 'spmailtechno' not in link:\n continue\n\n try:\n if re.search(track_copy_pattern, element.text):\n match = re.search(order_number_pattern, email[EMAIL_ATTR_BODY])\n if match:\n tracking_numbers.append({\n 'link': link,\n 'tracking_number': match.group(1)\n })\n except:\n pass\n\n return tracking_numbers", "def parse_track(line, user_id):\n track_data = []\n # Remove curly braces and split on commas\n tokens = re.split(\"[\\{\\}]|, \", line)\n\n for attr in ATTRIBUTES:\n for tok in tokens:\n tok = tok[2:] # strip (u') from attribute names \n if tok.startswith(attr):\n attr_data = re.split(\"\\': \", tok)\n val = \"0.0\"\n if attr_data[1] != \"None\":\n val = attr_data[1]\n track_data.append(val) \n return track_data", "def _parse_numbers(self, numberstr: str):\n numbers = []\n currentnumber = \"\"\n\n for c in numberstr:\n if c.isdigit() or c == '-' or c == '.':\n currentnumber += c\n elif len(currentnumber) > 0:\n numbers.append(float(currentnumber))\n currentnumber = \"\"\n if len(currentnumber) > 0:\n numbers.append(float(currentnumber))\n\n return np.array(numbers)", "def __parse_traffic(str):\n return float(str.strip().split(\",\")[0].replace('.',''))", "def parse_test_id(response):\n\n\n start = response.find('>test')\n start += 6\n end = response.find('log<') - 1\n test_number = int(response[start:end])\n return test_number", "def anno_parser(annos_str):\n annos = []\n for anno_str in annos_str:\n anno = list(map(int, anno_str.strip().split(',')))\n annos.append(anno)\n return annos", "def ExtractNumbers(s):\n\n t = s.strip('[]\\n')\n comma_space = r', '\n re_comma_space = re.compile(comma_space)\n z = re_comma_space.split(t)\n #print z\n return z", "def splitTrackingNums(_pack):\n multi = [ i.strip() for i in _pack[1].split(';') ]\n splits_ = [ [_pack[0], m] for m in multi ]\n return splits_", "def parse(self, string):\n parse = re.match(\"^((?:[0-9]{1,3}\\.){3}[0-9]{1,3})\\s\\(((?:\\d)*\\.(?:\\d)*|(?:\\d)*)\\sms\\)$\", string)\n parse_result = parse.groups()\n return parse_result[0], parse_result[1]", "def parseLog(self, log):\n return 0", "def _get_as_num(self, msg):\n country_m = re.search(\"country:(.*)\", msg, flags=re.IGNORECASE)\n try:\n country_code = country_m.group(1).strip()\n except AttributeError:\n country_code = None\n\n originAS_m = re.search(\"OriginAS:(.*)\", msg, flags=re.IGNORECASE)\n try:\n originAS = originAS_m.group(1).strip()\n except AttributeError:\n originAS_m = re.search(\"Origin:(.*)\", msg, flags=re.IGNORECASE)\n try:\n originAS = originAS_m.group(1).strip()\n except AttributeError:\n originAS = None\n\n return country_code, originAS", "def parse_precision(p):\n min = max = 0\n for c in p:\n if c in '@0':\n min += 1\n max += 1\n elif c == '#':\n max += 1\n elif c == ',':\n continue\n else:\n break\n return min, max", "def _parseNumber(self, str):\r\n\t\tif (str.count(\".\") == 0):\r\n\t\t\treturn int(str)\r\n\t\tif (str.count(\".\") == 1):\r\n\t\t\treturn float(str)\r\n\t\treturn str", "def parse_line(line):\n return [int(v) for v in line.strip().split()]", "async def parse(self, raw: str) -> dict:", "def getHNID(self, source):\n urlStart = source.find('score_') + 6\n urlEnd = source.find('\"', urlStart)\n try: \n return int(source[urlStart:urlEnd])\n except ValueError:\n return -1", "def parsing_no_hits_data(global_avg_trimmed_length):\n\n #No Hit Counter\n no_hit_counter = 0\n\n #Totally trimmed counter\n totally_trimmed_counter = 0\n\n #No hits results\n average_trimmed_no_hit_length=[]\n\n #Opening and Parsing blast_no_hits_report.txt\n no_hit_results = open('blast_no_hits_report.txt', 'r')\n for line in no_hit_results:\n data = line.split(\"\\t\")\n \n if line.startswith('SeqID'):\n continue\n else:\n average_trimmed_no_hit_length.append(float(data[4]))\n global_avg_trimmed_length.append(float(data[4]))\n \n no_hit_counter +=1\n \n if float(data[4]) == 0:\n totally_trimmed_counter +=1\n continue\n no_hit_results.close\n\n\n return {'no_hit_counter':no_hit_counter, 'totally_trimmed_counter':totally_trimmed_counter, \n 'average_trimmed_no_hit_length':average_trimmed_no_hit_length, 'global_avg_trimmed_length':global_avg_trimmed_length}", "def _parse_field(self, buf):\n\n delim = buf.find(b'=')\n if delim == -1:\n raise FIXParserError('Incorrect format: missing \"=\"')\n\n tag_id = 0\n try:\n tag_id = int(buf[:delim])\n except ValueError as err:\n raise FIXParserError(f'Incorrect format: ID:{str(buf[:delim])}') \\\n from err\n\n return (tag_id, buf[delim+1:])", "def parse_res_id(response):\n pass", "def parse(s):\n return s", "def parse_number():\n nonlocal idx\n num = \"\"\n def parse_digits():\n nonlocal idx\n num = \"\"\n while idx < len(source) and is_num_char(source[idx]):\n num += source[idx]\n idx += 1\n return num\n # Parse initial numbers\n oidx = idx\n num += parse_digits()\n if idx < len(source) and source[idx] == '.': # if we find a dot\n # Parse out the second part of the number string\n idx += 1\n num += (\".\" + parse_digits())\n if idx < len(source) and not terminal(source[idx]): # the number didn't terminate... this is an identifier\n idx = oidx\n return parse_symbol()\n idx -= 1 # Backtrack, bc last character is *invalid* and loop assumes we stop on a valid token character\n return num", "def parse_number(txt):\n return int(txt)", "def extract_values(line):\n return line.split('[Sentry Benchmark]')[-1].split('\\\\n')[0].split(',')", "def read4num(self,s):\n s = self.getKeyword(s)\n return np.float_(s.split(','))", "def number(full_address):\n warning_message = \"\"\"\\n\n This parser should be used with the knowledge that this\n function is open to four significant vulnerabilities:\n 1) `number()` will parse the first numeric characters it\n an address string contains (read from left to right).\n If the address string has:\n a) no building number\n b) numeric characters unrelated to addressable\n information at the start of the address string\n 2) Address numbers separated by `&` or `,` will not be parsed\n 3) Building names that include numeric characters are\n incorrectly parsed as building numbers\\n\n \"\"\"\n warnings.warn(warning_message)\n return capture_address_element(NUMBER_PATTERN, full_address)", "def parse_record(record_wrapper_elem) -> (int, int, int):\n records = record_wrapper_elem.contents[2].split('-')\n return int(records[0]), int(records[1]), int(records[2])", "def parse_spans(span_string):\n spans = []\n for span in span_string.split(';'):\n start, end = span.split(' ')\n spans.append((int(start), int(end)))\n return spans", "def parse_spans(span_string):\n spans = []\n for span in span_string.split(';'):\n start, end = span.split(' ')\n spans.append((int(start), int(end)))\n return spans", "def parse(self):" ]
[ "0.6796474", "0.59640026", "0.59547", "0.55090153", "0.54678917", "0.5286884", "0.5229485", "0.52005845", "0.5179443", "0.5159709", "0.5023201", "0.4963213", "0.49453238", "0.49155068", "0.48985288", "0.48899448", "0.48797825", "0.48751968", "0.48676506", "0.4864119", "0.4863788", "0.48556617", "0.48453233", "0.48229697", "0.480965", "0.48046646", "0.4799236", "0.47807583", "0.47807583", "0.47791183" ]
0.7204298
0
Initialising of delib64.dll (WIN10) a 64bit driver is expected. Number is the digit when more than one Interface of a Kind is configured..!
def __init__(self, Interface="USB", Number=DRIVER_NUM): self.bib = CDLL("delib64") # this will NOT fail... self.interface = Interface self.number = 0 #Number self.handle = 0 self.version = 0 if self.interface == "USB": self.createModule(self.RO_USB) elif self.interface == "ETH": self.createModule(self.RO_ETH) elif self.interface == "ETH_LC": self.createModule(self.RO_ETH_LC) else: print("Valid interfaces are USB or ETH and ETH/LC. Default ist USB.") sys.exit(1) #self.debugModule() #print("init object! Handle is: ", self.handle)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testAbiCompatibility64(self):\n if self._dut.GetCpuAbiList(64):\n self._TestAbiCompatibility(64)\n else:\n logging.info(\"Skip the test as the device doesn't support 64-bit \"\n \"ABI.\")", "def is_64_windows():\n return struct.calcsize('P') * 8 == 64", "def is64bit(self):\n return platform.machine().endswith('64')", "def is_64bit(self):\n pass", "def test_universal64_executable(self):\n self.build(debug_info=\"dsym\")\n self.do_test()", "def test_universal64_executable(self):\n self.build(debug_info=\"dsym\")\n self.do_test()", "def is_64bit():\n is64bit = sys.maxsize > 2 ** 32\n if sys.platform == \"cli\":\n is64bit = sys.executable.endswith(\"ipy64.exe\")\n return is64bit", "def osarch_is_64_bit():\n return osarch_match(\"64-bit\")", "def disk_lib_init():\n global is_64bits\n global use_sidecar_create\n\n # Define arg types for disk lib apis.\n if sys.hexversion >= PYTHON64_VERSION:\n is_64bits = True\n load_disk_lib(DISK_LIB64)\n lib.DiskLib_OpenWithInfo.argtypes = [c_char_p, c_int32,\n POINTER(c_uint32),\n POINTER(c_uint64),\n POINTER(c_uint64)]\n lib.DiskLib_Close.argtypes = [c_uint64]\n\n lib.DiskLib_SidecarOpen.argtypes = [c_uint64, c_char_p, c_int32,\n POINTER(c_uint64)]\n lib.DiskLib_SidecarClose.argtypes = [c_uint64, c_char_p,\n POINTER(c_uint64)]\n lib.DiskLib_DBGet.argtypes = [c_uint64, c_char_p, POINTER(c_char_p)]\n lib.DiskLib_DBSet.argtypes = [c_uint64, c_char_p, c_char_p]\n lib.DiskLib_GetSize.argtypes = [c_uint64, c_uint32,\n c_uint32, POINTER(disk_info)]\n # Check if this library supports create API\n try:\n lib.DiskLib_SidecarCreate.argtypes = [c_uint64, c_char_p, c_uint64,\n c_int32, POINTER(c_uint64)]\n lib.DiskLib_SidecarCreate.restype = int\n use_sidecar_create = True\n except:\n logging.debug(\n \"ESX version doesn't support create API, using open instead.\")\n pass\n else:\n load_disk_lib(DISK_LIB)\n lib.DiskLib_OpenWithInfo.argtypes = [c_char_p, c_int32,\n POINTER(c_uint32),\n POINTER(c_uint32),\n POINTER(c_uint32)]\n lib.DiskLib_Close.argtypes = [c_uint32]\n lib.DiskLib_SidecarOpen.argtypes = [c_uint32, c_char_p, c_int32,\n POINTER(c_uint32)]\n lib.DiskLib_SidecarClose.argtypes = [c_uint32, c_char_p,\n POINTER(c_uint32)]\n lib.DiskLib_DBGet.argtypes = [c_uint32, c_char_p, POINTER(c_char_p)]\n lib.DiskLib_DBSet.argtypes = [c_uint32, c_char_p, c_char_p]\n lib.DiskLib_GetSize.argtypes = [c_uint32, c_uint32,\n c_uint32, POINTER(disk_info)]\n\n # Check if this library supports create API\n try:\n lib.DiskLib_SidecarCreate.argtypes = [c_uint32, c_char_p, c_uint64,\n c_int32, POINTER(c_uint32)]\n lib.DiskLib_SidecarCreate.restype = int\n use_sidecar_create = True\n except:\n logging.debug(\n \"ESX version doesn't support create API, using open instead.\")\n\n lib.DiskLib_SidecarMakeFileName.argtypes = [c_char_p, c_char_p]\n\n # Define result types for disk lib apis.\n lib.DiskLib_OpenWithInfo.restype = int\n lib.DiskLib_Close.restype = int\n lib.DiskLib_SidecarOpen.restype = int\n lib.DiskLib_SidecarClose.restype = int\n lib.DiskLib_SidecarMakeFileName.restype = c_char_p\n lib.DiskLib_DBGet.restype = c_uint32\n lib.DiskLib_DBSet.restype = c_uint32\n lib.DiskLib_GetSize.restype = c_uint32\n\n return", "def is_64bit(self):\n return self.machine == 'x86_64'", "def getnumanz64(self):\n numanz_ = ctypes.c_int64()\n res = __library__.MSK_XX_getnumanz64(self.__nativep,ctypes.byref(numanz_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n numanz_ = numanz_.value\n _numanz_return_value = numanz_\n return (_numanz_return_value)", "def is64Bit(program: ghidra.program.model.listing.Program) -> bool:\n ...", "def test_universal64_dsym(self):\n self.build(debug_info=\"dsym\", dictionary={'FAT64_DSYM': '1'})\n self.do_test()", "def iperf_version(self):\n # TODO: Is there a better way to get the const char than allocating 30?\n VersionType = c_char * 30\n return VersionType.in_dll(self.lib, \"version\").value.decode('utf-8')", "def test_universal64_dsym(self):\n self.build(debug_info=\"dsym\", dictionary={\"FAT64_DSYM\": \"1\"})\n self.do_test()", "def bitness():\n # see https://docs.python.org/2/library/platform.html#platform.architecture\n return '64-bit' if sys.maxsize > 2**32 else '32-bit'", "def driver(self):\n driver = c_int()\n ckresult(_dll.FMOD_System_GetDriver(self._ptr, byref(driver)))\n return driver.value", "def getnumanz64(self): # 3\n res,resargs = self.__obj.getnumanz64()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _numanz_return_value = resargs\n return _numanz_return_value", "def _init_nt():\n g = {}\n g['EXE'] = \".exe\"\n g['SO'] = \".pyd\"\n g['SOABI'] = g['SO'].rsplit('.')[0] # xxx?\n\n global _config_vars\n _config_vars = g", "def setosvariablesx64():\n\tKPROCESS = ''\n\tFLINK = ''\n\tUPID = ''\n\tTOKEN = ''\n\tversion = sys.getwindowsversion()\n\tif((version.major == 5) and (version.minor == 2)):\n\t\t# the target machine's OS is Windows Server 2003\n\t\tprint \"[*] OS version: Windows Server 2003\"\n\t\tKPROCESS = '\\x68'\n\t\tTOKEN\t= '\\x60\\x01' #0x160\n\t\tUPID\t = '\\xd8\\x00'\n\t\tFLINK = '\\xe0\\x00'\n\telif((version.major == 6) and (version.minor == 1) and ('1' in version.service_pack)):\n\t\t# the target machine's OS is Windows 7x64 SP1\n\t\t#tbd\n\t\tprint \"[*] OS version: Windows 7x64 SP1\"\n\t\tKPROCESS = '\\x70'\n\t\tTOKEN\t= '\\x08\\x02' #0x208\n\t\tUPID\t = '\\x80\\x01' #180\n\t\tFLINK = '\\x88\\x01' #188\n\telse:\n\t\tprint \"[-] No matching OS version, exiting...\"\n\t\tsys.exit(-1)\n\t\t\n\treturn (KPROCESS,FLINK,UPID,TOKEN)", "def init_lib(self, data_path: str, encode: int, license_code: str) -> int:\n return self.get_func(\"DC_Init\", [c_char_p, c_int, c_int, c_int, c_char_p], c_int)(\n data_path,\n encode,\n self.FEATURE_COUNT,\n license_code\n )", "def _ecgInitAlgLib(self,libname='TgEcgAlg64.dll', power_frequency=60):\n\n if sys.maxsize > (2**32)/2-1: #running 64 bit\n print \"loading Neurosky tg_ecg library, 64 bit\"\n libname = 'TgEcgAlg64.dll'\n else:\n print \"loading Neurosky tg_ecg library, 32 bit\"\n #libname = 'TgEcgAlg.dll'\n libname = 'tg_ecg.so'\n print \"loading analysis library: \", libname\n E = cdll.LoadLibrary(libname)\n \n E.tg_ecg_do_hrv_sdnn(0)\n E.tg_ecg_do_relaxation_level(0)\n E.tg_ecg_do_respiratory_rate(0)\n E.tg_ecg_do_rri_precise(0)\n E.tg_ecg_set_power_line_freq(power_frequency)\n E.tg_ecg_get_raw_smoothed.restype = c_double\n E.tg_ecg_init() # init the library with selected options\n return E", "def __init__(self, *args):\n _snap.TFltUInt64Kd_swiginit(self, _snap.new_TFltUInt64Kd(*args))", "def test_ds18b20_get_kind(self):\n assert_equal(self.test_ds18b20.get_kind(), 'mpds18b20')", "def __init__(self, *args):\n _snap.TUInt64IntKd_swiginit(self, _snap.new_TUInt64IntKd(*args))", "def get_lib_extension():\r\n if sys.platform == 'win32':\r\n return 'pyd'\r\n else:\r\n return 'so'", "def test_dq_1_conftest(dq_1):\n assert dq_1._dll.head.data == 9", "def __init__(self, *args):\n _snap.TUInt64IntPr_swiginit(self, _snap.new_TUInt64IntPr(*args))", "def __init__(self, *args):\n _snap.TIntUInt64Kd_swiginit(self, _snap.new_TIntUInt64Kd(*args))", "def architecture(executable=None, bits='', linkage=''): ###\n # Use the sizeof(pointer) as default number of bits if nothing\n # else is given as default.\n if not bits:\n import struct\n try:\n size = struct.calcsize('P')\n except ValueError: ###\n # Older installations can only query longs\n size = struct.calcsize('l')\n bits = str(size*8) + 'bit'\n\n return bits, linkage" ]
[ "0.6060997", "0.5781516", "0.5779918", "0.56639785", "0.5572633", "0.5572633", "0.5465029", "0.546459", "0.5433739", "0.54001945", "0.5360808", "0.53498805", "0.52878857", "0.52732784", "0.5250136", "0.5163056", "0.5147184", "0.51412493", "0.5082714", "0.5015851", "0.49879456", "0.49677673", "0.49505574", "0.49351248", "0.4927725", "0.49269998", "0.49269482", "0.49057978", "0.4897451", "0.48785123" ]
0.5937619
1
Dieser Befehl dient zum Auslesen des TimeoutStatus. Argumente ([Debug=1])
def analogDaTimeoutStatus(self, Debug=0): self.bib.DapiSpecialCommand.argtypes = \ [c_ulong, c_ulong, c_ulong, c_ulong, c_ulong] self.bib.DapiSpecialCommand.restype = c_ulong timeout_status = self.bib.DapiSpecialCommand(self.handle,\ self.DAPI_SPECIAL_CMD_TIMEOUT, \ self.DAPI_SPECIAL_TIMEOUT_GET_STATUS, 0, 0) # ...switch off TO if Debug == 1: print("Timeout no/yes/done:",timeout_status) else: return(timeout_status)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assert_timeout(self) -> None:", "def _timeout(signum, frame):\n # Raise TimeoutException with system default timeout message\n raise TimeoutException()", "def analogDaTimeoutOff(self, Debug=0):\n self.bib.DapiSpecialCommand.argtypes = \\\n [c_ulong, c_ulong, c_ulong, c_ulong, c_ulong]\n self.bib.DapiSpecialCommand.restype = None\n self.bib.DapiSpecialCommand(self.handle, self.DAPI_SPECIAL_CMD_TIMEOUT, \\\n self.DAPI_SPECIAL_TIMEOUT_DEACTIVATE, 0, 0) # ...switch off TO \n timeout_status = self.analogDaTimeoutStatus() # check it...\n if Debug == 1 and timeout_status == 0:\n print(\"Timeout off:\",timeout_status)\n else:\n return(timeout_status)", "def pytest_timeout_cancel_timer(item):", "def get_timeout(self) -> int:", "def test_timeout(self) -> Optional[pulumi.Input['DurationArgs']]:\n return pulumi.get(self, \"test_timeout\")", "def test_timeout(self) -> Optional[pulumi.Input['DurationArgs']]:\n return pulumi.get(self, \"test_timeout\")", "def analogDaTimeoutOn(self, Sekunden, Hundertmillisekunden, Debug=0):\n self.bib.DapiSpecialCommand.argtypes = \\\n [c_ulong, c_ulong, c_ulong, c_ulong, c_ulong]\n self.bib.DapiSpecialCommand.restype = None # void\n self.bib.DapiSpecialCommand(self.handle, \\\n self.DAPI_SPECIAL_CMD_TIMEOUT, self.DAPI_SPECIAL_TIMEOUT_SET_VALUE_SEC, \\\n Sekunden, Hundertmillisekunden) # set and...\n self.bib.DapiSpecialCommand(self.handle, self.DAPI_SPECIAL_CMD_TIMEOUT, \\\n self.DAPI_SPECIAL_TIMEOUT_ACTIVATE, 0, 0) # ...activate\n timeout_status = self.analogDaTimeoutStatus()\n if Debug == 1 and timeout_status == 1:\n print(\"Timeout aktiv mit\",Sekunden,\"s,\",Hundertmillisekunden,\"ms.\")\n return(timeout_status)", "def test_timeout(self, mocker, mock_timedelta):\n\n tid = 289466\n site = \"mysite\"\n\n exception_response = self.generate_task_dictionary(\n tid, state=\"started\", completed=None\n )\n\n responses = [{\"json\": exception_response}]\n url = (\n \"https://cloudapi.acquia.com/v1/\"\n \"sites/prod:{site}/tasks/{tid}.json\".format(tid=tid, site=site)\n )\n\n mocker.register_uri(\"GET\", url, responses)\n\n with self.assertRaises(exceptions.AcquiaCloudTimeoutError):\n self.client.site(site).task(tid).wait(0)", "def handler(*args, **kwargs):\n raise TimeoutException(\"Test aborted due to timeout. Test was \" +\n \"expected to finish in less than {} second(s).\".format(time_limit))", "def pytest_timeout_set_timer(item, settings):", "def handler(signum, frame):\n\tglobal stop_flag\n\tstop_flag=True\n\tprint(\"Query timeout!!!!!!!!!!!\")\n\traise Exception(\"Timeout\")", "def on_timeout(self):\n pass", "def test_polling_plugin_timeout(self):\n pass", "def check_timeout(self, msg):\n if msg.clock.secs > self.timeout and not self.is_cancelled:\n rospy.loginfo(\"Test timed out, cancelling job\")\n self.utils.set_tag(name=self.test_name + \"_Status\", value=\"Failed\")\n self.utils.set_tag(name=self.test_name + \"_Timed_Out\", value=str(self.timeout))\n self.utils.cancel_job()", "def timeout(self):\n self._status_update(\"Pyloton: Timeout\")\n time.sleep(3)", "def test_RPC_TIMEOUT(self):\n self.assertIsInstance(constants.RPC_TIMEOUT, int,\n \"constants.RPC_TIMEOUT must be an integer.\")", "def pytest_timeout_set_timer(item, settings):\n tle.lib.set(int(settings.timeout), str(item).encode(\"utf-8\"))\n return True", "def settimeout(self, value: int) -> None:\n ...", "async def timeout(self, failed: bool = False) -> None:\n raise NotImplementedError()", "def SendTimeout(self) -> int:", "def SendTimeout(self) -> int:", "def test_timeout_pending(self):\n deadline = Deadline(MS)\n timeout = deadline.timeout()\n self.assertGreater(timeout, 0)\n self.assertLess(timeout, MS)", "def get_test_timeout(self):\n return None", "def StepTimeout(self):\n return recipe_api.StepTimeout", "def assert_timeout(self) -> None:\n if self._cancelled:\n raise asyncio.TimeoutError from None", "def test_timeout(self):\n # Attempt connection with short timeout\n with self.assertRaises(requests.exceptions.ReadTimeout):\n a = api.InvenTreeAPI(SERVER, username=USERNAME, password=PASSWORD, timeout=0.001) # noqa: F841", "def test_timeout(self) -> 'outputs.DurationResponse':\n return pulumi.get(self, \"test_timeout\")", "def test_timeout(self) -> 'outputs.DurationResponse':\n return pulumi.get(self, \"test_timeout\")", "def _handle_timeout(self, frame=None, **_):\n\n raise TimeOut.TimeOutError(self, frame)" ]
[ "0.7077678", "0.6608943", "0.65566635", "0.64567804", "0.6438783", "0.63034415", "0.63034415", "0.62909245", "0.6274172", "0.62536234", "0.6199818", "0.61930203", "0.61180824", "0.6080237", "0.6065386", "0.6058702", "0.6042492", "0.60340345", "0.6030219", "0.602094", "0.60089785", "0.60089785", "0.6008097", "0.60025424", "0.5992076", "0.59815407", "0.5950854", "0.5948193", "0.5948193", "0.5933087" ]
0.7469342
0
Mit diesem Befehl wird die Default Konfiguration eines D/A Wandlers geladen. Der D/A Wandler Kanal wird sofort auf die Ausgabespannung 0V gesetzt. Argumente (Startchannel, [Stopchannel>=Startchannel], [Debug=1])
def analogDaSetZero(self, Startchannel, Stopchannel=None, Debug=0): self.bib.DapiSpecialCommand.argtypes = \ [c_ulong, c_ulong, c_ulong, c_ulong, c_ulong] self.bib.DapiSpecialCommand.restype = None if Stopchannel == None or Stopchannel == Startchannel: #Stopchannel = for Startchannel in range(Startchannel, Startchannel+1): self.bib.DapiSpecialCommand(self.handle, self.DAPI_SPECIAL_CMD_DA, \ self.DAPI_SPECIAL_DA_PAR_DA_LOAD_DEFAULT, Startchannel, 0) print("D/A Channel",Startchannel,"set to zero volts.") else: for Startchannel in range(Startchannel, Stopchannel+1): self.bib.DapiSpecialCommand(self.handle, self.DAPI_SPECIAL_CMD_DA, \ self.DAPI_SPECIAL_DA_PAR_DA_LOAD_DEFAULT, Startchannel, 0) print("D/A Channel",Startchannel,"set to zero volts.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def default_channel(self) -> int:\r\n ...", "def configure_debug_options(self, initial_debug_options, ide_data):\r\n debug_options = copy.deepcopy(initial_debug_options)\r\n if \"cmsis-dap\" in debug_options[\"tool\"]:\r\n debug_options[\"server\"][\"arguments\"].extend( [\r\n \"-c\", \"adapter speed %s\" % (initial_debug_options.get(\"speed\") or \"20000\"),\r\n \"-c\", \"transport select swd\"\r\n ]\r\n )\r\n return debug_options", "async def defchannel(self, ctx, channel: str):\n self.data_check(ctx)\n server = ctx.message.server\n\n self.riceCog2[server.id][\"defchannel\"] = channel\n dataIO.save_json(self.warning_settings,\n self.riceCog2)\n await self.bot.say(\"Log channel is now: **{}**\".format(channel))", "def setup(self, channel: int, direction: int, pull_up_down=None, initial=None):\n self._check_mode()\n # ueberpruefe, ob die Richtung (direction) gueltig ist\n # wenn dies der Fall ist, erstelle aus der Zahl einen lesbaren Text\n if direction == GPIO.OUT:\n direction_text = \"OUT\"\n elif direction == GPIO.IN:\n direction_text = \"IN\"\n else:\n raise ValueError(\"An invalid direction was passed to setup()\")\n self.channels[channel] = direction\n print(f\"setup channel {channel} auf {direction_text} mit pull_up_down {pull_up_down} und initial {initial}\")", "def setUp(self):\n self.frequency = -2017.96\n self.tunneling = Wigner(\n frequency=(self.frequency, \"cm^-1\"),\n )", "def test_switch_channels(self):\n\t\t# not available yet, experimental\n\t\tpass", "async def startchannel(self, ctx, vc: discord.VoiceChannel):\n await self.config.guild(ctx.guild).pstart.set(vc.id)\n await self.config.guild(ctx.guild).pcat.set(vc.category_id)\n await ctx.send(\n _(\n \"Private starting channel set. Users can join this channel to use all features of private rooms.\\nI recommend not allowing members to speak in this channel.\"\n )\n )", "def config (self):\n import wikicode\n class Config (wikicode.extension):\n def run (self):\n self.send_page (\"Generic DC Setup\")\n wikicode.run_extension (Config)", "def test_default(self):\n control_channel = ControlChannel(123)\n\n self.assertEqual(control_channel.index, 123)\n self.assertEqual(control_channel.name, \"u123\")", "def debug_option(args, run):\n run.debug = True", "def setup():\n global channels\n #Get all necessary values\n dmx = int(request.args.get('dmx', default=\"-1\"))\n typus = request.args.get('type')\n custom_name = request.args.get('name', default=\"\")\n force = bool(request.args.get('force', default=0))\n #See if the type of light already exists\n if typus in all_lights:\n num = len(all_lights[typus])\n attr = all_lights[typus]\n else:\n return \"Light not in Database\"\n\n if not force:\n for i in range(dmx, (dmx+num)):\n if not channels[i] == '':\n return \"Channels already in use, force with parameter force=1\"\n break\n\n if custom_name == \"\":\n custom_name = typus + str(dmx)\n #Setup the name channels\n for i in range(num):\n channels[dmx + i] = (str(custom_name) + \" | \" + attr[i])\n print str(dmx+i) + str(channels[dmx+i])\n return json_back()", "def test_default(self):\n drive_channel = DriveChannel(123)\n\n self.assertEqual(drive_channel.index, 123)\n self.assertEqual(drive_channel.name, \"d123\")", "def set_channel(self, c, channel):\n try:\n self.binding.set_switcher_channel(channel)\n except Exception, e:\n self.handle_wavemeter_error(e)\n return False\n\n return True", "def test_default(self):\n measure_channel = MeasureChannel(123)\n\n self.assertEqual(measure_channel.index, 123)\n self.assertEqual(measure_channel.name, \"m123\")", "def set_specific_channel(channel_number):\n global interface\n\n print(\"Set channel to {} on interface {}\".format(channel_number, interface))\n system(f\"iwconfig {interface} channel {channel_number}\")", "def PYDSO010SETCHAN(self):\n ctx = self.item_start() # always first line of test\n\n chan = ctx.item.chan\n if not (0 < chan < 5):\n self.logger.error(\"Invalid channel number: {} (1-4 accepted)\".format(chan))\n self.item_end(ResultAPI.RECORD_RESULT_INTERNAL_ERROR)\n return\n\n self.shared_lock(self.DSO).acquire()\n\n # reset the scope to a known state\n self.dso.write('*RST')\n if chan != 1: # after reset, chan 1 is already on\n self.dso.write(':CHANnel1:DISPlay OFF') # turn off channel 1\n self.dso.write(':CHANnel{}:DISPlay ON'.format(chan)) # turn off channel 1\n\n self.dso.write(':CHANnel{}:SCALe 100mV'.format(chan))\n\n vpp = self.dso.query(':MEASure:VPP? CHANnel{}'.format(chan))\n value = float(vpp)\n _result, _bullet = ctx.record.measurement(\"VPP{}\".format(chan), value, ResultAPI.UNIT_VOLTS)\n\n self.log_bullet(\"Switched to channel {}\".format(chan))\n self.log_bullet(_bullet)\n time.sleep(0.1) # give it some time to sit here, else its too fast\n self.shared_lock(self.DSO).release()\n self.item_end() # always last line of test", "def cbDConfigPort( BoardNum, PortNum, Direction ):\n CHK( cbw.cbDConfigPort( BoardNum, PortNum, Direction ) )", "def display(self, channel1 = False, channel2 = False, channel3 = False, channel4 = False):\t\t\n\t\tself.scope.write(\":CHANnel1:DISPlay %s\"%bool2ONOFF(channel1))\n\t\tself.scope.write(\":CHANnel2:DISPlay %s\"%bool2ONOFF(channel2))\n\t\tself.scope.write(\":CHANnel3:DISPlay %s\"%bool2ONOFF(channel3))\n\t\tself.scope.write(\":CHANnel4:DISPlay %s\"%bool2ONOFF(channel4))", "def set_parameters(self, args):\n self.args = args\n\n if args.testing:\n self.delay_close()\n\n if args.source == \"simulation\":\n log.info(\"Create simulated spectra device\")\n self.dev = simulation.SimulatedSpectraDevice()\n\n elif args.source == \"sled\":\n log.info(\"Create single sled cobra\")\n self.dev = simulation.SimulatedCobraSLED()\n\n elif args.source == \"cobra\":\n log.info(\"Create DALSA cobra device\")\n #self.dev = devices.DalsaCobraDevice()\n self.dev = DALSA.Cobra()\n\n elif args.source == \"opto\":\n log.info(\"Create OPTO sensor cobra device\")\n self.dev = DALSA.OPTOCobra()\n\n elif args.source == \"basler\":\n log.info(\"Create DALSA basler device\")\n #self.dev = devices.DalsaBaslerDevice()\n self.dev = DALSA.BaslerSprint4K()\n\n self.dev.setup_pipe()\n self.setup_pipe_timer()", "async def default(self, ctx: Context, *, guild: int = None, channel: int = None):\n\n if not guild:\n guild = ctx.guild\n else:\n guild = self.bot.get_guild(guild)\n if not guild:\n return await ctx.message.add_reaction(\"⚠\")\n\n if guild.id not in self.active_guilds:\n return await ctx.message.add_reaction(\"⚠\")\n\n if not channel:\n channel = ctx.channel\n else:\n channel = self.bot.get_channel(channel)\n if not channel:\n return await ctx.message.add_reaction(\"⚠\")\n\n config = self.get_guild_config(guild)\n config[\"default_modlog\"] = str(channel.id)\n\n self.config.hmset(f\"guilds:{guild.id}\", config)\n self._config_cache[guild.id] = config\n\n await ctx.message.add_reaction(\"✅\")", "def set():\n #Get values\n dmx = int(request.args.get('dmx'))\n value = int(request.args.get('value', default=\"-1\"))\n color = request.args.get('color', default=\"#000000\").strip(\"#\")\n #Check if in usable range\n #Dismantle colors\n r = int(color[0:2], 16)\n g = int(color[2:4], 16)\n b = int(color[4:7], 16)\n c,m,y,k = rgb_to_cmyk(r, g, b)\n fixture = channels[dmx].split(\" | \")[0]\n print fixture\n \"\"\"\n max_count = len(all_lights[fixture])\n print max_count\n \"\"\"\n prev_fix = fixture\n if r+g+b != 0:\n count = -1\n while(True):\n if count >= 512:\n break\n count += 1\n if channels[count] == \"\":\n continue\n fixture = channels[count].split(\" | \")[0]\n if fixture != prev_fix:\n continue\n name = channels[count].split(\" | \")[1]\n print name\n if name == \"R\":\n adresses[count] = r\n if name == \"G\":\n adresses[count] = g\n if name == \"B\":\n adresses[count] = b\n if name == \"C\":\n adresses[count] = c\n if name == \"M\":\n adresses[count] = m\n if name == \"Y\":\n adresses[count] = y\n if name == \"K\":\n adresses[count] = k\n else:\n if not 0 <= value <= 255:\n return \"Invalid Value\"\n adresses[dmx] = value\n dmxsender.send(adresses)\n #Return Debug information\n return json_back()", "def start_or_continue(S,cfg,bands,confluence_fp='default'):\n if confluence_fp=='default':\n return start_confluence_log_file(S,cfg,bands)\n continue_config(S,cfg,bands,confluence_fp)", "def test_channel_definition(self):\n TopoObj('topo', data, channels=channels)", "def change_channel():\n global interface\n\n print(\"Change channels for interface {}\".format(interface))\n channel_number = 1\n\n while True:\n system(f\"iwconfig {interface} channel {channel_number}\")\n channel_number = channel_number % 14 + 1\n sleep(0.5)", "def start(self):\n self.report(f'INFO: started KKR Band Structure workflow version {self._wf_version}')\n wf_dict = self.inputs.wf_parameters.get_dict()\n # Count energy points only once\n if 'NPT2' in wf_dict.keys():\n npt2 = wf_dict.pop('NPT2', None)\n wf_dict['nepts'] = npt2\n # add missing default values\n for key, val in self._wf_default.items():\n if ((key not in wf_dict.keys()) and (key.swapcase() not in wf_dict.keys()) and (val is not None)):\n\n self.report(f'INFO: Using default wf parameter {key}: {val}')\n wf_dict[key] = val\n\n options_dict = self.inputs.options.get_dict()\n if options_dict == {}:\n self.report('INFO: Using default wf Options')\n options_dict = self._options_default\n self.ctx.append_text = options_dict.get('append_text', self._options_default['append_text'])\n self.ctx.prepend_text = options_dict.get('prepend_text', self._options_default['prepend_text'])\n self.ctx.additional_retrieve_list = options_dict.get(\n 'additional_retrieve_list', self._options_default['additional_retrieve_list']\n )\n self.ctx.withmpi = options_dict.get('withmpi', self._options_default['withmpi'])\n self.ctx.resources = options_dict.get('resources', self._options_default['resources'])\n self.ctx.max_wallclock_seconds = options_dict.get(\n 'max_wallclock_seconds', self._options_default['max_wallclock_seconds']\n )\n self.ctx.queue = options_dict.get('queue_name', self._options_default['queue_name'])\n self.ctx.custom_scheduler_commands = options_dict.get('custom_scheduler_commands', '')\n self.ctx.BS_params_dict = wf_dict\n self.ctx.BS_kkrparams = None # is set in set_params_BS\n self.ctx.BS_kpoints = None\n self.ctx.description_wf = self.inputs.get('description', self._wf_description)\n self.ctx.label_wf = self.inputs.get('label', self._wf_label)\n self.report(\n 'INFO: use the following parameter:\\n'\n 'withmpi: {}\\n'\n 'Resources: {}\\n'\n 'Walltime (s): {}\\n'\n 'queue name: {}\\n'\n 'scheduler command: {}\\n'\n 'description_wf: {}\\n'\n 'label_wf: {}\\n'\n 'BS_params: {}\\n'.format(\n self.ctx.withmpi, self.ctx.resources, self.ctx.max_wallclock_seconds, self.ctx.queue,\n self.ctx.custom_scheduler_commands, self.ctx.description_wf, self.ctx.label_wf, self.ctx.BS_params_dict\n )\n )\n\n self.ctx.successful = True\n self.ctx.errors = []", "def setup(self, channels):\n self.channels = channels[:]", "def setup_channels():\n\n # Setup channel encoders\n for c in channels:\n channels[c].setup()\n print()", "def test_single_scan_report_full_scan_for_channels_with_enumerated_params(\n self):\n scan_settings = self.wifi_generate_scanner_scan_settings(\n self.run_extended_test, \"channels\",\n wutils.WifiEnums.REPORT_EVENT_FULL_SCAN_RESULT)\n self.log.debug(\"Full Scan settings:{}\\n{}\".format(\n len(scan_settings), scan_settings))\n self.wifi_scanner_single_scan_full(scan_settings[0])", "def set_defaults(self):\n if not self.HAS_DS9: # pragma: no cover\n return\n self.run('frame delete all')\n self.run('wcs degrees')\n if self.disp_parameters['tile']:\n self.run('tile yes')\n else:\n self.run('tile no')\n self.cs = str(self.disp_parameters['lock_image']).lower()\n self.lock()", "def chanDefaults(self) -> Dict[str, Any]:\n chanH = {}\n chanH[\"gain_stage1\"] = 1\n chanH[\"gain_stage2\"] = 1\n chanH[\"hchopper\"] = 0 # this depends on sample frequency\n chanH[\"echopper\"] = 0\n # channel output information (sensor_type, channel_type, ts_lsb, pos_x1, pos_x2, pos_y1, pos_y2, pos_z1, pos_z2, sensor_sernum)\n chanH[\"ats_data_file\"] = \"\"\n chanH[\"num_samples\"] = 0\n chanH[\"sensor_type\"] = \"\"\n chanH[\"channel_type\"] = \"\"\n chanH[\"ts_lsb\"] = 1\n # the lsb/scaling is not applied. data is raw voltage which needs to be scaled\n # an lsb is constructed from the scaling in the XTR/XTRX file to take the data to mV\n chanH[\"scaling_applied\"] = False # check this\n chanH[\"pos_x1\"] = 0\n chanH[\"pos_x2\"] = 0\n chanH[\"pos_y1\"] = 0\n chanH[\"pos_y2\"] = 0\n chanH[\"pos_z1\"] = 0\n chanH[\"pos_z2\"] = 0\n chanH[\"sensor_sernum\"] = 0\n return chanH" ]
[ "0.5998552", "0.54007995", "0.51367766", "0.50856996", "0.50539327", "0.49470997", "0.48775223", "0.48275897", "0.48265904", "0.48177746", "0.48160458", "0.48083013", "0.47990912", "0.47555563", "0.47544914", "0.4741228", "0.4726859", "0.47231343", "0.4722098", "0.4722084", "0.47213402", "0.47161314", "0.47056982", "0.47016415", "0.46983346", "0.4682042", "0.4680221", "0.46698323", "0.4661592", "0.46590644" ]
0.6567898
0
Inicia un Equipo con un diccionario de atributos. La lista de pokemones vivos se guardan en una tupla junto a sus movimientos
def __init__(self, diccionario): self.numero = diccionario['numero'] self.nombre = diccionario['equipo_nombre'] self.pokmov = lectores.pokemon_y_movimiento_a_tuplas(diccionario)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self,jogadores):\r\n #Inicia alguns verificadores usados posteriormente na cena\r\n self.jogadores = jogadores\r\n self.teclas = [pygame.K_RETURN] + jogadores[2][4:7] + jogadores[3][4:7]\r\n self.tela = pygame.display.get_surface()\r\n self.area = self.tela.get_rect()\r\n self.width = self.tela.get_width()\r\n self.height = self.tela.get_height()\r\n self.deslocax = 0.\r\n self.parou = False\r\n self.primeiraiteracao = True\r\n\r\n #Carrega fundo da tela de versus\r\n self.fundo = Sprite(os.path.join('.','Imagens', \"vsback.png\"),(0,0),False,(self.width,self.height))\r\n\r\n #Carrega personagem player 1\r\n self.player1 = Sprite(os.path.join('.','Personagens', self.jogadores[0], \"vs.png\"),(0,0),False,(self.width*0.5,self.height*0.598333333))\r\n\r\n #Carrega personagem player 2\r\n self.player2 = Sprite(os.path.join('.','Personagens', self.jogadores[1], \"vs.png\"),(0,0),False,(self.width*0.5,self.height*0.598333333))", "def __init__(self, jugador):\n\n # Se llama al metodo del padre constructor.\n Level.__init__(self, jugador)\n\n #Cargamos la imagen de fondo.\n sprite_sheet_pantalla = SpriteSheet(\"imagenes/fondoactualizado.png\")\n \n # Carga de todos los sprite de la imagen hacia la derecha.\n imagen_1 = sprite_sheet_pantalla.obtener_imagen(0,1788, 896,894)\n self.fondo = imagen_1\n \n self.fondo.set_colorkey(constantes.BLANCO)\n self.limite_derecho = 740\n self.limite_izquierdo = 88\n self.limite_superior = -10\n self.limite_inferior = 686\n self.cambio_nivel_x = 396\n self.cambio_nivel_y = -16\n \n self.fondo.set_colorkey(constantes.BLANCO)\n self.limite_nivel = -2500\n\n # Lista con los bloques de plataformas, indicando la ubicacion x,y y el tipo \n nivel = [ [platforma.STONE, 250, 740],\n [platforma.STONE, 250, 680],\n [platforma.STONE, 250, 620],\n [platforma.STONE, 250, 560],\n [platforma.STONE, 250, 500],\n [platforma.STONE, 250, 440],\n [platforma.STONE, 250, 380],\n [platforma.STONE, 250, 320],\n [platforma.STONE, 310, 320],\n [platforma.STONE, 370, 320],\n [platforma.STONE, 430, 320],\n [platforma.STONE, 490, 320],\n [platforma.STONE, 550, 320],\n [platforma.STONE, 610, 320],\n [platforma.STONE, 610, 380],\n [platforma.STONE, 610, 440],\n [platforma.STONE, 610, 500],\n [platforma.STONE, 610, 560],\n [platforma.STONE, 610, 620],]\n \n #puntos\n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 150\n puntos.rect.y = 700\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos) \n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 320\n puntos.rect.y = 390\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 366\n puntos.rect.y = 390\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 412\n puntos.rect.y = 390\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 458\n puntos.rect.y = 390\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 504\n puntos.rect.y = 390\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 560\n puntos.rect.y = 390\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 320\n puntos.rect.y = 467\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 366\n puntos.rect.y = 467\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 412\n puntos.rect.y = 467\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 458\n puntos.rect.y = 467\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 504\n puntos.rect.y = 467\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 560\n puntos.rect.y = 467\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 320\n puntos.rect.y = 544\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 366\n puntos.rect.y = 544\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 412\n puntos.rect.y = 544\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 458\n puntos.rect.y = 544\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 504\n puntos.rect.y = 544\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 560\n puntos.rect.y = 544\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 320\n puntos.rect.y = 621\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 366\n puntos.rect.y = 621\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 412\n puntos.rect.y = 621\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 458\n puntos.rect.y = 621\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 504\n puntos.rect.y = 621\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n puntos = Estrellas(ESTRELLA)\n puntos.rect.x = 560\n puntos.rect.y = 621\n puntos.jugador = self.jugador\n puntos.nivel = self\n self.lista_puntos.add(puntos)\n \n \n \n \n\n # Se busca en la lista anterior creada y se le agregan las plataformas al nivel.\n for plataforma in nivel:\n bloque = platforma.Plataforma(plataforma[0])\n bloque.rect.x = plataforma[1]\n bloque.rect.y = plataforma[2]\n bloque.jugador = self.jugador\n self.lista_plataformas.add(bloque)", "def __init__(self, name, element, health, speed):\n super().__init__(name, element, health, speed)\n #Move list unique to all Water type Pykemon\n self.moves = ['Bite', 'Splash', 'Dive', 'Water Cannon']", "async def loadpokemon(self, ctx):\n await self.bot.di.new_items(ctx.guild, (ServerItem(**item) for item in self.bot.pokemonitems.values()))\n await ctx.send(await _(ctx, \"Successfully added all Pokemon items!\"))", "def dialoguer(self, dialog,placeDialog):\n perso = []\n \"\"\"if self.niveau.numero == 1 and placeDialog == 0 : #virer cette ligne quand tous les dialogues auront été faits\n musicDialogue.play(pygame.mixer.Sound(\"resources/niveau/{2}/{0}.wav\".format(placeDialog,dialog.counter,self.niveau.numero)))\"\"\"\n for liste in dialog.characters:\n img = pygame.image.load(liste[1]).convert_alpha()\n perso.append([liste[0], img, liste[2]])\n for p in perso:\n #print(p)\n #print(p[1].get_rect())\n rect = p[1].get_rect()\n rect.x, rect.y = p[2]\n p[2] = (rect.x, self.fenetre.hauteur - 100 - rect.height)\n if p[2][0] == 500 :\n p[2]= (constantes.largeur-rect.width,self.fenetre.hauteur-100-rect.height)\n self.fenetre.rafraichir(self.moleculeJoueur.hp)\n while dialog.notFinished:\n punchline = dialog.getPunchline()\n try : #virer cette ligne quand tous les dialogues auront été faits\n audio = pygame.mixer.Sound(\"resources/niveau/{2}/{3}/{0},{1}.wav\".format(placeDialog,dialog.counter,self.niveau.numero,constantes.langue))\n \"\"\"volume = audioDialogue.get_volume()\n multiplier = 1/volume\n audioDialogue.set_volume(multiplier*(1-punchline[1])+0.4,multiplier*(punchline[1])+0.4)\"\"\"\n audioDialogue.play(audio)\n except:\n pass\n posX, posY = perso[punchline[1]][2]\n\n #print(punchline[1][0])\n #pygame.draw.rect(self.fenetre.fen, pygame.Color(0, 0, 0, 0), pygame.Rect(0, 0, self.fenetre.largeur, self.fenetre.hauteur))\n #self.fenetre.fen.blit(sombre, (0,0))\n self.fenetre.assombrir()\n self.fenetre.fen.blit(perso[punchline[1]][1], (posX, posY))\n self.fenetre.dessinerCadre(0, self.fenetre.hauteur-100, 100, self.fenetre.largeur)\n self.font = self.fenetre.font\n surface = self.font.render(perso[punchline[1]][0], 0, pygame.Color(255, 0, 0, 0))\n self.fenetre.dessinerCadre(posX+50, posY-25, 30, surface.get_rect().width+10)\n self.fenetre.ecrireTexte(perso[punchline[1]][0], posX + 55, posY - 20)\n self.fenetre.ecrireTexte(punchline[0], 25, self.fenetre.hauteur-80)\n event = pygame.event.wait()\n #audio = pygame.mixer.Sound(\"resources/temporaire/\"+str(dialog.counter)+\".wav\")\n #audioDialogue(audio)\n reading = True\n while reading:\n event = pygame.event.wait()\n if event.type == KEYDOWN:\n if event.key == constantes.touches[0]:\n reading = False\n if event.key == K_LEFT:\n reading = False\n dialog.counter-=2\n if dialog.counter<0:\n dialog.counter = 0\n\n self.fenetre.rafraichir(self.moleculeJoueur.hp)\n self.fenetre.fen.blit(perso[punchline[1]][1], (posX, posY))\n audioDialogue.stop()\n musicDialogue.stop()", "def load_items(self):\n # LIST OF THE ITEMS TO COLLECT TO WIN\n list_items = [self.aiguille, self.ether, self.tube]\n # CALLING OF THE METHODS define_position\n list_items[0].define_position_item_1()\n list_items[1].define_position_item_2()\n list_items[2].define_position_item_3()\n # LOOP FOR, FOREACH ITEM IN THE LIST, WE DRAW IT ON THE SCREEN\n for item in list_items:\n # CALLING OF THE METHOD display_items\n item.display_items(self.window)\n # IF MACGVER COLLECTS AN ITEM...\n if (self.macgyver.position_x == list_items\n [list_items.index(item)].obj_sprite_x) \\\n and (self.macgyver.position_y == list_items\n [list_items.\n index(item)].obj_sprite_y):\n # IT MAKES A SOUND\n pygame.mixer.music.load(ITEM_SOUND)\n pygame.mixer.music.play()\n # IT INCREMENTS MACGYVER'S BAG\n self.macgyver.bag += 1\n # IT MOVES THE OBJECT TO THE BAG\n list_items[list_items.index(item)].obj_sprite_x = \\\n TILESIZE*(5 + list_items.index(item))\n list_items[list_items.index(item)].obj_sprite_y = \\\n NBCASES*TILESIZE\n # IT HIDES THE QUESTIONS MARK\n self.noitem.fill(TRANSPARENT)", "def equipar(self, nome, comando):\n\n try:\n com, mao = comando.split()\n except ValueError:\n com = comando\n mao = None\n if com:\n usr_glb = self.orm.get_global(nome)\n bolsa = json.loads(usr_glb.inventario)\n slots = json.loads(usr_glb.equipado)\n if any([i[0] == com for i in bolsa]):\n eqp = self.orm.get_item(com).equipa\n if eqp == 'maos':\n if mao in ['mao_direita', 'mao_esquerda']:\n slots[mao] = com\n self.orm.update_global(nome, equipado=json.dumps(slots))\n else:\n return f\"voce tem que escolher se quer equipar na 'mao_direita' ou 'mao_esquerda'.\"\n else:\n if eqp:\n slots[eqp] = com\n self.orm.update_global(nome, equipado=json.dumps(slots))\n else:\n return 'voce nao pode equipar este item, tente USAR ao inves disso.'\n return f\"'{com}' foi equipado\"\n return 'esse item não existe. Escreva INVENTARIO para ver o que voce possui.'\n return 'voce nao escreveu qual item do inventario quer equipar.'", "def __init__(self, altura, peso, edad):\n\t\tself.altura = altura # OJO TODAS LAS VARIABLES SON PUBLICAS \n\t\tself.peso = peso \n\t\tself.edad = edad\n\t\tself.profesion = \"\" # esta la inicializamos nosotros\n\t\tself.lista_tareas = []\n\t\tself.__privado = 1 # este atributo es privado no podemos acceder a el desde fuera", "def __init__(self, sistema, nombre):\r\n productos = sistema.productos\r\n\r\n self.nombre = nombre\r\n # TODO modificar con datos reales\r\n self.carga = random.choice(productos)\r\n\r\n if random.random() <= 0.5:\r\n self.tipo = \"Descarga\"\r\n self.peso = 28 # TODO analizar eliminación\r\n else:\r\n self.peso = 0\r\n self.tipo = \"Carga\"\r\n\r\n niveles = {self.carga: self.peso}\r\n\r\n self.trailer = MedioDeAlmacenamiento(sistema, str(self.nombre), 1, niveles, 28, self.peso)\r\n\r\n self.manipulado = sistema.event()\r\n self.transbordo = \"No\"", "def __init__(self, game):\n self.rooms = self.load_rooms(f\"data/{game}Rooms.txt\")\n self.items = self.load_items(f\"data/{game}Items.txt\")\n self.inventory = self.load_inventory()\n self.current_room = self.rooms[0]\n self.idlist = []\n self.player = Inventory()", "def __init__(self):\n\t\tself.Nombre = \"\"\n\t\tself.Apellido = \"\"\n\t\tself.Edad = \"\"\n\t\tself.Sexo = \"Masculino\"\n\t\t\"\"\"Fecha de la carga del paciente dentro del software\"\"\"\n\t\tself.Fecha = strftime(\"%H : %M : %S\", gmtime())\n\t\t\"\"\"Estudio a realizar sobre la muestra\"\"\"\n\t\tself.Test = \"GLU\"\n\t\t\"\"\"Numero identificatorio unico de paciente en estudio\"\"\"\n\t\tself.ID = 0\n\t\t\"\"\"Posicion dentro del carrusel de muestras\"\"\"\n\t\tself.Posicion = \"\"\n\t\t\"\"\" La prioridad tiene dos estados: Es \"True\" si se requiere \n\t\tque el estudio para este paciente sea \n\t\trealizado inmediatamente \"\"\"\n\t\tself.Prioridad = False\n\t\t\"\"\" Boton desactivado de informe \"\"\"\n\t\tself.Informe = GdkPixbuf.Pixbuf.new_from_file('./Glade/botonnodisponible2.png')\n\t\t\"\"\"Resultado\"\"\"\n\t\tself.Resultado = \"5 g/ml\"\n\t\t\"\"\"Valor inicial de la barra de progreso\"\"\"\n\t\tself.Progreso = 1", "def craft(self, items):\n\n if items[0].looted and items[1].looted and items[2].looted:\n print(\"Seringue fabriquée ! Vous pouvez endormir le garde.\")\n self.stuff = [\"seringue\"]", "def equip(self, command):\n\n if len(command) > 1:\n if not self.weapon:\n for item in self.inventory:\n if item.name == command[1]:\n if command[1] == 'knife' or command[1] == 'rock' or command[1] == 'stick' or command[1] == 'lamp':\n self.inventory.remove(item)\n self.weapon.append(item)\n print(\"You equipped a \" + item.name)\n return\n else:\n print(\"You can't equip that\")\n else:\n print('You cannot equip two items \\nYou must unequip the ' + self.weapon[0].name + ' first.')\n else:\n print(\"Equip what?\")", "def pickUp(self):\n pos = self.getRoverLocation()\n item = self.map[pos.y,pos.x]\n if type(item) == Part:\n self.inventory.addPart(str(item))\n self.map[pos.y,pos.x] = None", "def __init__(self, name, location, health):\n self.name = name\n self.location = location\n self.inventory = []\n self.weapon = []\n self.health = health", "def pintarIMAGENENMAPA(self, pos):\n # Agrego al vector que controla las images\n k = (pos[0], pos[1], \"img\"+str(self.idIMG))\n # Si deseo pintar una silla\n if self.queIMGAgregar == 1:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgSilla, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar una mesa\n if self.queIMGAgregar == 2:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgMesa, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Se deseo pintar una nevera\n if self.queIMGAgregar == 3:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgNevera, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar una cama\n if self.queIMGAgregar == 4:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgCama, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar un sofa\n if self.queIMGAgregar == 5:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgSofa, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar tv\n if self.queIMGAgregar == 6:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgTV, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar lampara\n if self.queIMGAgregar == 7:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgLampara, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar planta\n if self.queIMGAgregar == 8:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgPlanta, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar sanitario\n if self.queIMGAgregar == 9:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgSanitario, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar lavamanos\n if self.queIMGAgregar == 10:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgLavamanos, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar la ducha\n if self.queIMGAgregar == 11:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgDucha, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1", "def __init__(self, ventana_id):\n\n GObject.Object.__init__(self)\n\n self.name = \"MplayerReproductor\"\n self.ventana_id = ventana_id\n self.mplayer = False\n self.salida = False\n self.entrada = False\n self.estado = False\n self.duracion = 0\n self.posicion = 0\n self.volumen = 0\n self.actualizador = False\n self.uri = False\n self.video_in_stream = False\n\n self.config = {\n 'saturacion': 0,\n 'contraste': 0,\n 'brillo': 0,\n 'hue': 0,\n 'gamma': 0\n }\n\n self.efectos = []\n self.config_efectos = {}", "def __init__(self, game, name, pos=[0, 0], spin=0, world=1):\n super(Item, self).__init__(game, Surface((1, 1)))\n self.name = name\n self.file = name.lower().replace(' ', '_') + '.png'\n self.pos = pos\n self.dead = 0\n self.belongs = True\n self.id = uuid4()\n self.image = self.getSurface(name)\n self.rect = self.image.get_rect()\n self.rect.x = self.pos[0]\n self.rect.y = self.pos[1]\n if spin:\n self.vector = [-7, -7]\n else:\n self.vector = [0, 0]\n # decides to put item in world or not\n if world:\n self.game.EntityHandler.world_items.append(self)", "def iniciarjuego():\r\n for i in range(4):\r\n if i == 1 or i == 0:\r\n color = (250,208,120,98)\r\n else:\r\n color = (159, 250, 120,98)\r\n x = random.choice(cuadrados)\r\n listafiguras.append(x)\r\n dibujarcuadrados(x, color)\r\n cuadrados.remove(x)\r\n\r\n for i in range(4):\r\n if i == 0 or i == 1:\r\n color = (195, 139, 255,100)\r\n else:\r\n color = (250, 242, 120,98)\r\n x = random.choice(cuadrados)\r\n listafiguras.append(x)\r\n dibujarcirculos(x, color)\r\n cuadrados.remove(x)\r\n\r\n for i in range(4):\r\n if i == 0 or i == 1:\r\n color = (250, 145, 137,98)\r\n else:\r\n color = (126, 139, 250,98)\r\n x = random.choice(cuadrados)\r\n listafiguras.append(x)\r\n dibujartriangulos(x, color)\r\n cuadrados.remove(x)\r\n\r\n for i in range(4):\r\n if i == 0 or i == 1:\r\n color = (176, 255, 237,100)\r\n else:\r\n color = (255, 176, 228,100)\r\n x = random.choice(cuadrados)\r\n listafiguras.append(x)\r\n dibujardiamantes(x, color)\r\n cuadrados.remove(x)", "async def equip(self, ctx, *args):\n if has_post_permission(ctx.guild.id, ctx.channel.id):\n item = ' '.join(args)\n out = ch.equip_item(ctx.user_object, item.lower())\n await ctx.send(out)", "def ubicar_portaviones():\n tamano = Portaviones.tamano #se importa el tamano del barco desde su clase\n cantidad = Portaviones.cantidad #se importa la cantidad de barcos de este tamano desde su clase\n orientacion = orientaciones[(randint(0, 1))] #elige aleatoriamente el index de la tupla orientaciones = (\"Vertical\", \"Horizontal\")\n while cantidad > 0:\n if orientacion == \"Vertical\":\n #se eligen random las filas y las columnas\n coor_fila = randint(1, numero_filas) \n coor_columna = randint(1, numero_columnas)\n while (coor_fila + tamano) > 10: #como su orientacion es vertical la fila incial del barco mas su tamano (3) no puede ser mayor que 10 porque se saldria del mapa\n coor_fila = randint(1,numero_filas)\n ubicacion = (coor_fila, coor_columna)\n lista_temporal.append(ubicacion) #lista donde se ubicaran temporalmente las ubicaciones de los barcos\n while len(lista_temporal) < tamano: #sacar las posiciones restantes \n coor_fila += 1\n ubicacion = (coor_fila, coor_columna)\n lista_temporal.append(ubicacion)\n cantidad -= 1\n elif orientacion == \"Horizontal\":\n #se eligen random las filas y las columnas\n coor_fila = randint(1, numero_filas)\n coor_columna = randint(1, numero_columnas)\n while (coor_columna + tamano) > 10: #como su orientacion es horizontal la columna incial del barco mas su tamano (3) no puede ser mayor que 10 porque se saldria del mapa\n coor_columna = randint(1, numero_columnas)\n ubicacion = (coor_fila, coor_columna)\n lista_temporal.append(ubicacion)\n while len(lista_temporal) < tamano: #sacar las posiciones restantes\n coor_columna += 1\n ubicacion = (coor_fila, coor_columna)\n lista_temporal.append(ubicacion)\n for x in lista_temporal:\n lista_ubicacion_barco.append(x) #se agregan las ubicaciones ya validadas a una lista general donde iran todas las posiciones de los barcos\n coordenadas_portaviones.append(x)\n lista_temporal.clear() #se limpia la lista para que pueda ser usada en los siguientes barcos\n cantidad -= 1", "async def fruitmachine(self, ctx: commands.Context):\n slots = [get(ctx.guild.emojis, name='roobs'),\n get(ctx.guild.emojis, name='wess'),\n get(ctx.guild.emojis, name='yeng'),\n get(ctx.guild.emojis, name='blek'),\n get(ctx.guild.emojis, name='pyrr'),\n get(ctx.guild.emojis, name='noodle'),\n get(ctx.guild.emojis, name='nora'),\n get(ctx.guild.emojis, name='renbo'),\n get(ctx.guild.emojis, name='hapbun'),\n get(ctx.guild.emojis, name='hapshork'),\n get(ctx.guild.emojis, name='skatergorl'),\n get(ctx.guild.emojis, name='rainbowgorl')]\n\n content = discord.Embed()\n\n def gen_post(player, first, second, third, under_text=None, finish=False):\n content.description = \"**Welcome to Yutu's Casino {}!**\\n\\n\".format(ctx.author)\n content.description += \"**[ {} {} {} ]**\\n\\n\".format(first, second, third)\n if under_text is not None:\n content.description += \"{}\\n\".format(under_text)\n if player.coins == 0:\n content.description += \"You are out of coins.\\n\\n\"\n else:\n content.description += \"You currently have **{}** coins.\\n\\n\".format(player.coins)\n if finish:\n content.description += \"Thank you for playing!\"\n else:\n content.description += \"Add a 🔁 react to spin the slots. Add ❌ to stop.\"\n return content\n\n with orm.db_session:\n await ctx.message.delete()\n\n try:\n player = self.Player[ctx.author.id]\n except orm.ObjectNotFound:\n player = self.Player(id=ctx.author.id, coins=10)\n gen_post(player, '❓', '❓', '❓')\n post = await ctx.send(embed=content)\n await post.add_reaction('🔁')\n await post.add_reaction('❌')\n\n def chk(reaction, user):\n return (str(reaction.emoji) in ['❌', '🔁'] and\n user == ctx.author and\n reaction.message.id == post.id)\n\n while True:\n if player.coins == 0:\n break\n try:\n react, _ = await ctx.bot.wait_for(\"reaction_add\", check=chk, timeout=300)\n except asyncio.TimeoutError:\n break\n if str(react.emoji) == '❌':\n break\n player.coins -= 1\n first, second, third = random.choice(slots), random.choice(slots), random.choice(slots)\n\n if first == second == third:\n player.coins += 20\n tag = \":trophy: Jackpot! :trophy:\\nYou win 20 coins!\"\n elif first == second or first == third or second == third:\n player.coins += 5\n tag = \"You win 5 coins!\"\n else:\n tag = \"Better luck next time.\"\n gen_post(player, first, second, third, under_text=tag)\n await post.edit(embed=content)\n gen_post(player, '❓', '❓', '❓', finish=True)\n await post.edit(embed=content, delete_after=30)", "def __init__(self, name, element, health, speed):\n super().__init__(name, element, health, speed)\n #Move list unique to all Grass type Pykemon\n self.moves = ['Vine Whip', 'Wrap', 'Grow', 'Leaf Blade']", "def place_pieces(self):\n # Soldiers\n # RED\n self.add_piece('a4', Soldier('RED'))\n self.add_piece('c4', Soldier('RED'))\n self.add_piece('e4', Soldier('RED'))\n self.add_piece('g4', Soldier('RED'))\n self.add_piece('i4', Soldier('RED'))\n # BLUE\n self.add_piece('a7', Soldier('BLUE'))\n self.add_piece('c7', Soldier('BLUE'))\n self.add_piece('e7', Soldier('BLUE'))\n self.add_piece('g7', Soldier('BLUE'))\n self.add_piece('i7', Soldier('BLUE'))\n # Cannons\n # RED\n self.add_piece('b3', Cannon('RED'))\n self.add_piece('h3', Cannon('RED'))\n # BLUE\n self.add_piece('b8', Cannon('BLUE'))\n self.add_piece('h8', Cannon('BLUE'))\n # Generals\n # RED\n self.add_piece('e2', General('RED'))\n # BLUE\n self.add_piece('e9', General('BLUE'))\n # Chariots\n # RED\n self.add_piece('a1', Chariot('RED'))\n self.add_piece('i1', Chariot('RED'))\n # BLUE\n self.add_piece('a10', Chariot('BLUE'))\n self.add_piece('i10', Chariot('BLUE'))\n\n # Horses\n # RED\n self.add_piece('c1', Horse('RED'))\n self.add_piece('h1', Horse('RED'))\n # BLUE\n self.add_piece('c10', Horse('BLUE'))\n self.add_piece('h10', Horse('BLUE'))\n # Elephants\n # RED\n self.add_piece('b1', Elephant('RED'))\n self.add_piece('g1', Elephant('RED'))\n # BLUE\n self.add_piece('b10', Elephant('BLUE'))\n self.add_piece('g10', Elephant('BLUE'))\n # Advisors\n # RED\n self.add_piece('d1', Guard('RED'))\n self.add_piece('f1', Guard('RED'))\n # BLUE\n self.add_piece('d10', Guard('BLUE'))\n self.add_piece('f10', Guard('BLUE'))", "def instantiate_pokemon(self\n , type:str\n , hp: int = 0\n , attack: int = 0\n , defense: int = 0\n , sp_atk: int = 0\n , sp_def: int = 0\n , speed: int = 0\n , level: int = 0\n , name: str = \"\"\n , common_attacks = [AbstractAttack]) -> AbstractPokemon:\n\n if type==\"Supporter\" :\n pokemon = SupporterPokemon(\n stat_max=Statistic(\n hp=hp,\n attack=attack,\n defense=defense,\n sp_atk=sp_atk,\n sp_def=sp_def,\n speed=speed\n )\n ,stat_current=Statistic(\n hp=hp,\n attack=attack,\n defense=defense,\n sp_atk=sp_atk,\n sp_def=sp_def,\n speed=speed\n )\n , level=level\n , name=name\n , common_attacks=common_attacks)\n elif type==\"Speedster\" :\n pokemon = SpeedsterPokemon(\n stat_max=Statistic(\n hp=hp,\n attack=attack,\n defense=defense,\n sp_atk=sp_atk,\n sp_def=sp_def,\n speed=speed\n )\n ,stat_current=Statistic(\n hp=hp,\n attack=attack,\n defense=defense,\n sp_atk=sp_atk,\n sp_def=sp_def,\n speed=speed\n )\n , level=level\n , name=name\n , common_attacks=common_attacks)\n elif type==\"Attacker\" :\n pokemon = AttackerPokemon(\n stat_max=Statistic(\n hp=hp,\n attack=attack,\n defense=defense,\n sp_atk=sp_atk,\n sp_def=sp_def,\n speed=speed\n )\n ,stat_current=Statistic(\n hp=hp,\n attack=attack,\n defense=defense,\n sp_atk=sp_atk,\n sp_def=sp_def,\n speed=speed\n )\n , level=level\n , name=name\n , common_attacks=common_attacks)\n elif type==\"Defender\" :\n pokemon = DefenderPokemon(\n stat_max=Statistic(\n hp=hp,\n attack=attack,\n defense=defense,\n sp_atk=sp_atk,\n sp_def=sp_def,\n speed=speed\n )\n ,stat_current=Statistic(\n hp=hp,\n attack=attack,\n defense=defense,\n sp_atk=sp_atk,\n sp_def=sp_def,\n speed=speed\n )\n , level=level\n , name=name\n , common_attacks=common_attacks)\n elif type==\"All-Rounder\" :\n pokemon = AllRounderPokemon(\n stat_max=Statistic(\n hp=hp,\n attack=attack,\n defense=defense,\n sp_atk=sp_atk,\n sp_def=sp_def,\n speed=speed\n )\n ,stat_current=Statistic(\n hp=hp,\n attack=attack,\n defense=defense,\n sp_atk=sp_atk,\n sp_def=sp_def,\n speed=speed\n )\n , level=level\n , name=name\n , common_attacks=common_attacks)\n else :\n raise Exception(f\"{type} n'est pas un type valide\")\n\n return pokemon", "async def pointstop(self, ctx):\n inventories = get_file(\"inventories\")\n players_points = {}\n\n for player in inventories.items():\n player_points = 0\n for item in player[1][\"items\"]:\n player_points += item[\"points\"]\n players_points[player[0]] = player_points\n\n ptop = sorted(players_points.items(), key=lambda x: x[1], reverse=True)\n\n player_field = \"\"\n points_field = \"\"\n rank = 1\n\n for player in ptop:\n player_field += f\"`#{rank}` <@{player[0]}>\\n\"\n points_field += f\"`{player[1]}`\\n\"\n rank += 1\n\n embed = discord.Embed(color=default_color)\n embed.set_author(name=\"🏆 Classement des points\")\n embed.add_field(name=\"[#] Joueur\", value=player_field)\n embed.add_field(name=\"Points\", value=points_field)\n embed = set_footer(embed, ctx)\n await ctx.send(embed=embed)", "def pickUpObject(player):\n for treasure in Treasure.List:\n distance2 = (treasure.x-player.x)*(treasure.x-player.x)+(treasure.y-player.y)*(treasure.y-player.y)\n if distance2 < 4 * (treasure.width * treasure.width+ treasure.height*treasure.height):\n if not player.treasureCaptured:\n player.treasureCaptured = True\n treasure.isCaptured = True\n treasure.showCaptured()", "def test_equip_helmet(self):\n inv_str = self.inv.pretty\n item_str = self.item_helmet.pretty\n ids_to_unequip = self.inv.equip(self.item_helmet)\n\n self.rebuild_instance()\n inv_str2 = self.inv.pretty\n helmet = self.inv.head.pretty\n\n assert inv_str.replace(\n \"head=None\", \"head='<HeadArmour(id=2)>'\").replace(\n \"equipped=[]\", \"equipped='[<HeadArmour(id=2)>]'\").replace(\n \"unequipped='[<HeadArmour(id=2)>]'\", \"unequipped=[]\") == inv_str2\n\n assert item_str.replace(\n \"equipped=False\", \"equipped=True\").replace(\n \"unequipped_position=0\", \"unequipped_position=None\") == helmet", "def algoritmo(posicionInicial):\n\tglobal vertices, lados\n\n\t# Inicio el algoritmo QuickHull y almacenare el resultado en vertices\n\tconvex = quickHull.QuickHull(posicionInicial)\n\tvertices = convex.quickHull(verticesObjeto)\n\tprint(\"\\nVertices en la envolvente convexa:\\n\")\n\tfor vertex in (vertices):\n\t\tprint(vertex)\n\n\t# creo la lista de lados a partir del los vertices de la envolvente\n\tlados = creaLados(vertices)\n\n\t# creo la maya y el objeto\n\tmi_mesh = bpy.data.meshes.new(nombre)\n\tmi_objeto = bpy.data.objects.new(nombre, mi_mesh)\n\n\t# coloco el objeto en la misma posicion en la que estaba el objeto\n\t# anteriormente seleccionado.\n\tmi_objeto.location = posicionInicial\n\n\t# enlazo el objeto a la escena\n\tbpy.context.scene.objects.link(mi_objeto)\n\n\t# creo el la maya del objeto\n\tmi_mesh.from_pydata(vertices,lados,caras)\n\tmi_mesh.update(calc_edges=True)", "def create():\n\tnombre = input(\"Ingrese el nombre de pokemon: \")\n\testado = input(\"Ingrese el estado. Si el pokemon no tiene estado, ingrese X: \")\n\n\tif estado.upper() == \"X\":\n\t\testado = None\n\n\tif estado not in estados_permitidos:\n\t\tprint(\"Estado de pokemon no permitido. Registro no fue insertado.\")\n\t\tprint(\"Devolviendo al menu principal...\")\n\t\treturn\n\n\thp_actual = int(input(\"Ingrese HP actual de pokemon: \"))\n\tfecha = input(\"Ingrese la fecha en formato DD/MM/YY HH24:MM (ej 06/09/20 14:20): \")\n\tinsertar_pokemon(nombre, hp_actual, estado, fecha)" ]
[ "0.5493299", "0.53785163", "0.5365299", "0.529691", "0.52771807", "0.52318215", "0.5227122", "0.5218565", "0.5150027", "0.5147518", "0.51257867", "0.511523", "0.51093465", "0.5084413", "0.5066921", "0.50494576", "0.5047126", "0.5046697", "0.5029651", "0.5026552", "0.5024618", "0.5009792", "0.5007596", "0.5006941", "0.50068134", "0.49980405", "0.49933285", "0.49757096", "0.49487406", "0.49277273" ]
0.6067137
0
Shows add file dialog
def handleActionAdd(self): self.fDialog.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handleAddFileButtonClicked(self):\n # Find the directory of the most recently opened image file\n mostRecentImageFile = PreferencesManager().get( 'DataSelection', 'recent image' )\n if mostRecentImageFile is not None:\n defaultDirectory = os.path.split(mostRecentImageFile)[0]\n else:\n defaultDirectory = os.path.expanduser('~')\n\n # Launch the \"Open File\" dialog\n fileNames = self.getImageFileNamesToOpen(defaultDirectory)\n\n # If the user didn't cancel\n if len(fileNames) > 0:\n PreferencesManager().set('DataSelection', 'recent image', fileNames[0])\n try:\n self.addFileNames(fileNames)\n except RuntimeError as e:\n QMessageBox.critical(self, \"Error loading file\", str(e))", "def add_file_option(self, name, callback):\n item = self.file_menu.Append(-1, name, name)\n self.Bind(wx.EVT_MENU, callback, item)", "def add_media_dialog(self):\r\n\r\n file = filedialog.askopenfilename()\r\n\r\n if add_media(file, 0, self): # Checking if the process of adding the media file was successful\r\n\r\n if file: # Checking whether the user has aborted the operation\r\n # Getting the path of the file with respect to the current media folder (since the \"file\" variable\r\n # points to the location of the source file)\r\n full_path = os.path.join(media_folder, os.path.basename(file)).replace(\"\\\\\", \"/\")\r\n\r\n # Whenever a media item is added, the user is automatically prompted to configure its metadata\r\n self.configure_media(os.path.basename(file), full_path)", "def buttonClick(self):\n \n self.fpath=filedialog.askopenfilename()\n self.label_fpath.config(text=self.fpath)\n self.err_label.config(text='')\n pass", "def add_file(self):\n self._new_exp = AddExperimentWindow(self._fitter, self._exp)\n self._new_exp.show()", "def open_file(self): # need to fix this to open in a new window\n\t\tself.file_path = filedialog.askopenfilename()\n\t\tf = open(self.file_path)\n\t\tfreader = f.read()\n\t\tself.textBox.insert(END, freader)", "def open_file(self):\n selected_file = open_file(self, 'Add File', _USER_DOCUMENTS, 'All Files (*)')\n if not selected_file:\n self.configuration_widgets.logger.warning('No file has been selected.')\n return\n self.configuration_widgets.logger.info('Processing File - {}'.format(selected_file))\n # Passing the selected item to the configure module to be processed\n _configure_object = ConfigureFiles(folder=os.path.dirname(selected_file))\n _configure_object.single_file(selected_file)\n # Adding the file\n self.tree_widget.add_items(_configure_object, self.configuration_widgets)", "def new_file(self):\r\n self.filename = QFileDialog.getSaveFileName(\r\n None, 'Title', '', 'TXT (*.txt)'\r\n )\r\n if self.filename[0]:\r\n self.currentfile = open(self.filename[0], 'w')\r\n (self.base_name, self.ext) = os.path.splitext(self.filename[0])\r\n self.FilePath.setText(self.filename[0])", "def on_add_file(self, event):\n wildcard = \"Media Files (*.*)|*.*\"\n dlg = wx.FileDialog(\n self, message=\"Choose a file\",\n defaultDir=self.currentFolder, \n defaultFile=\"\",\n wildcard=wildcard,\n style=wx.OPEN | wx.CHANGE_DIR\n )\n if dlg.ShowModal() == wx.ID_OK:\n path = dlg.GetPath()\n self.currentFolder = os.path.dirname(path[0])\n trackPath = '\"%s\"' % path.replace(\"\\\\\", \"/\")\n self.mplayer.Loadfile(trackPath)\n \n t_len = self.mplayer.GetTimeLength()\n self.playbackSlider.SetRange(0, t_len)\n self.playbackTimer.Start(100)", "def add_file(\n self,\n path: str,\n label: Optional[str] = None,\n ) -> None:\n resolved = Path(path).resolve()\n self.logger.info(\"Adding file: %s\", resolved)\n\n if not resolved.exists():\n self.logger.warning(\"File does not exist: %s\", resolved)\n\n def open_file(*_):\n if platform.system() == \"Windows\":\n os.startfile(resolved) # type: ignore # pylint: disable=no-member\n elif platform.system() == \"Darwin\":\n subprocess.call([\"open\", resolved])\n else:\n subprocess.call([\"xdg-open\", resolved])\n\n self._client.add_element(\n element=ElevatedButton(\n text=(label or str(resolved)), icon=icons.FILE_OPEN, on_click=open_file\n )\n )", "def add_files(self):\n file_paths = tkinter.filedialog.askopenfilenames(parent=self)\n\n if not file_paths:\n return\n for file_path in file_paths:\n self.files_treeview.insert(\"\", \"end\", values=(file_path,))\n self.files_treeview.selection_set(self.files_treeview.get_children()[-1])", "def open_file():\n filepath = filedialog.askopenfilename(initialdir = \"./\",title = \"Seleccionar archivo\",filetypes = ((\"xls files\",\"*.xls\"),(\"xlsx files\",\"*.xlsx\")))\n if not filepath:\n return\n\n window.title(filepath)\n lbl_url[\"text\"] = filepath\n btn_generate['state'] = 'normal'", "def add_command_file_page(wiz, final_page):\n page = wiz.add_page(u\"Filename for writting command file\", final_page)\n lay = page.use(qt.QHBoxLayout())\n lay.addWidget(qt.QLabel(u\"Aster command file\"))\n entry = CommandFileEntry()\n lay.addWidget(entry)\n page.register_qt_field(\"command-file*\", entry)\n but = GC.create_icon_button(\"load-file.png\", entry.load_from_disk)\n lay.addWidget(but)", "def cb_new(self, button):\n print(\"New File callback\")\n self.cb_save_as(button)", "def file_popup(file) -> str:\n layout = [\n [sg.Text(f\"Select the action to perform on\\n\\n{file}\")],\n [sg.Button(\"Open File\", key=\"-APP-\"),\n sg.Button(\"Open in File Explorer\", key=\"-EXPLORER-\"),\n sg.Button(\"Delete File\", key=\"-DEl-\",\n button_color=(\"Black\", \"OrangeRed\"))]\n ]\n window = sg.Window(\"Open selected file.\", layout, finalize=True)\n button, value = window.read()\n window.close()\n del window\n return button", "def NewFile(self, e=0):\n\n self.m_Filter.SetValue('')\n # clear all\n self.clear_controls()\n self.d = dict()\n # Raname Title of window\n self.file = \"\"\n self.SetTitle(\"Chainer\")\n self.n = 1\n self.set_arrows()\n #self.mnemo_hide()", "def file_menu_new_activate(self, widget, data=None):\n self.new_chooser.show()", "def open_file(entry):\n entry.delete(0, 'end')\n file = askopenfile(mode ='r', filetypes =[('PDF Files', '*.pdf')])\n if file is not None: \n entry.insert(0, file.name)", "def ask_file(message=\"Select file for open.\", title=None):\n return dialog(\"ask_file\", message=message, title=title)", "def gui_fnames(dir=None):\n if dir is None: dir ='./'\n message = \"Select all the folders to add\"\n dir_names = FileDialog()\n dir_names.show()", "def fileDialog(*args, application: bool=True, defaultFileName: AnyStr=\"\", directoryMask:\n AnyStr=\"\", mode: int=0, title: AnyStr=\"\", **kwargs)->AnyStr:\n pass", "def fileDialogOp(self):\n self.fileLocation.delete(0, tk.END)\n self.fileLocation.insert(0,\n filedialog.asksaveasfilename(\n title='Select Log File Location',\n filetypes=[('csv file', '*.csv')],\n defaultextension='.csv'))", "def _open_file(self):\n file = QFileDialog.getOpenFileName(self, \"Open file\", \".\")[0]\n if file:\n self.try_add_tab(file)", "def add_path(self, widget, title, file_text):\r\n\r\n # Decide default directory\r\n default_dir = get_default_dir()\r\n\r\n # File picker\r\n options = QFileDialog.Options()\r\n options |= QFileDialog.DontUseNativeDialog\r\n filename = QFileDialog.getOpenFileName(self, title, default_dir, file_text, options=options)\r\n\r\n if filename[0]:\r\n widget.setText(filename[0].replace(\"/\",\"\\\\\"))", "def newFile(self):\n self.open_file_name = None\n self.ui.main_edit.setText(\"\")\n self.saveEnabled(False)", "def __onAddClicked(self):\n\t\tdir_name = QFileDialog.getExistingDirectory(self, \"Select a directory\")\n\t\tif dir_name is not None:\n\t\t\ttheItem = addNewListItemCalled([dir_name], self.ui.listWidget, mutable=True)\n\t\t\tif theItem is not None:\n\t\t\t\ttheItem.setSelected(True)", "def add_song(self):\n settings = dict(initialdir=pathlib.Path().absolute(), title=\"Choose songs\", filetypes=(\n (\"flac files\", \"*.flac\"),\n (\"mp3 files\", \"*.mp3\"),\n (\"all files\", \"*\")))\n\n song = filedialog.askopenfilename(**settings)\n\n self.update_playlist(song)\n self.listbox.insert(\"end\", self.song_list[-1]['name'])", "def openFileDialog(self): \n self.dialog = ocempgui.widgets.Box(373, 372)\n self.dialog.topleft = 528, 205\n\n background = guiobjects.OcempImageMapTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(WINDOW_UPLOAD))\n self.dialog.add_child(background)\n \n self.listDir = guiobjects.OcempImageFileList(310, 239)\n self.listDir.topleft = 31, 60\n self.dialog.add_child(self.listDir)\n\n buttonOK = guiobjects.OcempImageButtonTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_OK), self.buttonTooltips[\"ok\"], self.parent.showTooltip, self.parent.removeTooltip)\n buttonOK.topleft = [233, 308]\n buttonOK.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.closeFileDialog,\"OK\")\n self.dialog.add_child(buttonOK)\n \n buttonCancel = guiobjects.OcempImageButtonTransparent(GG.genteguada.GenteGuada.getInstance().getDataPath(BUTTON_CANCEL), self.buttonTooltips[\"cancel\"], self.parent.showTooltip, self.parent.removeTooltip)\n buttonCancel.topleft = [122, 308]\n buttonCancel.connect_signal(ocempgui.widgets.Constants.SIG_CLICKED, self.closeFileDialog,\"KO\")\n self.dialog.add_child(buttonCancel)\n\n self.window.add_child (self.dialog)", "def add(self, filename, *args):\n return self.cmd('add', filename, *args)", "def showInputFileInExplorer(self):\r\n\r\n filename = self.ui.inputFilenameLineEdit.text()\r\n if not os.path.isfile(filename):\r\n QMessageBox.warning(self, \"Cannot show input file\", \"The input file does not exist\")\r\n return\r\n QDesktopServices.openUrl(QUrl.fromLocalFile(os.path.dirname(filename)))" ]
[ "0.73031515", "0.696894", "0.689285", "0.6701149", "0.6628294", "0.65997547", "0.6579842", "0.65336597", "0.6524251", "0.6516713", "0.6485592", "0.6429493", "0.6420707", "0.64015096", "0.6400006", "0.63792896", "0.6371303", "0.63395435", "0.633879", "0.6333457", "0.6301128", "0.6265357", "0.6232336", "0.62275684", "0.62065643", "0.6202446", "0.62000805", "0.6159992", "0.61295485", "0.6120027" ]
0.74322236
0
Remove metadata by clicking X
def removeMeta(self, row, column): filePath = self.filesList.selectedItems()[0].text(2) metaHeader = (self.metadataList.item(row, 0)).text() logging.debug("Removing metadata " + metaHeader + " from " + str(filePath)) self.filesList.removeMeta(filePath, metaHeader, row)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handleCleanMetadataKeep(self):\n logging.debug(\"Removing all metadata found...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n self.filesList.removeAllMeta(filePath)", "def handleRemoveFile(self):\n for w in self.filesList.selectedItems():\n self.filesList.removeFile(w.text(2))\n self.metadataList.clear()\n self.metadataList.setRowCount(0)\n self.metadataList.setHorizontalHeaderLabels([\"Metadata Header\", \"Value\"])\n self.personalDataList.clear()", "def remove_extra_metadata(meta: dict) -> None:\n keys = get_extra_metadata_keys()\n remove_keys(data=meta, keys=keys)", "def onClearButton(self):\n markupsNode = slicer.util.getNode( \"MarkupsFiducial\" ) \n markupsNode.RemoveAllMarkups()", "def _delete_metadata(self, metadata_role):\n \n # The root metadata role is never deleted without a replacement.\n if metadata_role == 'root':\n return\n \n # Get rid of the current metadata file.\n self._move_current_to_previous(metadata_role)\n \n # Remove knowledge of the role.\n if metadata_role in self.metadata['current']:\n del self.metadata['current'][metadata_role]\n tuf.roledb.remove_role(metadata_role)", "def handleAllMetaClear(self, path):\n logging.debug(\"All Metadata removed, clearing the table...\")\n self.metadataList.clear()\n self.metadataList.setRowCount(0)\n self.metadataList.setHorizontalHeaderLabels([\"Metadata Header\", \"Value\"])\n self.fileNotSupported.hide()\n self.changeEnableMenus(self.filesList.getFileObj(path))", "def delete_plugin_data(self):", "def __delitem__(self, name):\n tag = self._find(name)\n if tag is not None:\n self.meta.remove(tag)\n else:\n raise KeyError(name)", "def delete_meta_file(self):\n try:\n self.logger.debug('Delete old metadata file %s.', self.meta_file_path)\n os.remove(self.meta_file_path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n raise MetadataError('Failed to delete old metadata file. {}'\n .format(ex.strerror))", "def delete_server_metadata(self, name):\n raise NotImplementedError", "def aboutToDelete(self):\n \n pass", "def aboutToDelete(self):\n \n pass", "def aboutToDelete(self):\n \n pass", "def clear(self):\n for tag in self.meta.findall(CN('meta:user-defined')):\n self.meta.remove(tag)", "def remove():", "def delete(self) -> None:\n try:\n self._logger.debug('Delete old metadata file %s.', self._path)\n os.remove(self._path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n msg = 'Failed to delete old metadata file. {}'.format(ex.strerror)\n raise MetaFileError(msg)", "def remove(self):", "def handleCleanMetadataRecon(self):\n logging.debug(\"Removing compromising personal info and remaking the file...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n fileType = self.filesList.getFileObj(filePath).type\n self.printPdfPersonalData(filePath, \n fileType,\n AddedFile.changeBase(filePath, self.outputPath))\n self.tabArea.setCurrentIndex(1)\n self.changeCursor()\n self.filesList.getFileObj(filePath).reconMetaCleaned = True", "def delete_thumbnail(self, thumbnail_name):", "def delete(self):\n del self.shx.atoms[self.index]", "def remove():\n run('pew rm {0}'.format(package_name()))", "def delete(log, session, args):\n log('imageset id: {highlight}{id}{reset}',\n highlight=Fore.GREEN,\n id=args.id,\n reset=Style.RESET_ALL)\n log.warn('delete imageset command coming soon.')", "def remove_data(writer: UFOWriter, filename: str) -> None:\n writer.removeImage(filename)", "def delX(self):\n del self.components[0]", "def delX(self):\n del self.components[0]", "def Remove(self, e):\n self.reset(unset_namelist=True)", "def delete_entry(title):\n filename = f\"entries/{title}.md\"\n if default_storage.exists(filename):\n default_storage.delete(filename)", "def unload(self):\n for action in self.actions:\n self.iface.removePluginVectorMenu(\n self.tr(u'&Save Attributes'),\n action)\n self.iface.removeToolBarIcon(action)", "def delInfo(label: str):\r\n\r\n if not self.isClosed:\r\n if label in self.__identity_info.keys():\r\n del self.__identity_info[label]\r\n else:\r\n raise HDDOPermissionException('Tried to delete non-existing identity information in a HealthDominoDataObject.')\r\n else:\r\n raise HDDOPermissionException('Tried to delete identity information from a closed HealthDominoDataObject.')", "def unload(self):\n for action in self.actions:\n self.iface.removePluginVectorMenu(\n self.tr(u'&SaveAttributes'),\n action)\n self.iface.removeToolBarIcon(action)" ]
[ "0.68331033", "0.6547086", "0.6482654", "0.612664", "0.6104849", "0.6095725", "0.6092427", "0.6057032", "0.60220397", "0.60176575", "0.59810984", "0.59810984", "0.59810984", "0.5941894", "0.5923921", "0.5883442", "0.5852436", "0.58467954", "0.58435553", "0.58377486", "0.57839864", "0.5766376", "0.5738638", "0.5711615", "0.5711615", "0.5711574", "0.57023764", "0.567582", "0.5674045", "0.56706464" ]
0.720556
0
Clean metadata but keep orig file
def handleCleanMetadataKeep(self): logging.debug("Removing all metadata found...") filePath = self.filesList.selectedItems()[0].text(2) self.filesList.removeAllMeta(filePath)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handleCleanMetadataRecon(self):\n logging.debug(\"Removing compromising personal info and remaking the file...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n fileType = self.filesList.getFileObj(filePath).type\n self.printPdfPersonalData(filePath, \n fileType,\n AddedFile.changeBase(filePath, self.outputPath))\n self.tabArea.setCurrentIndex(1)\n self.changeCursor()\n self.filesList.getFileObj(filePath).reconMetaCleaned = True", "def clean(self):\n actual_output_file = path.splitext(self.source_name)[0] + \".actual\"\n if path.exists(self.binary_name):\n os.unlink(self.binary_name)\n if path.exists(actual_output_file):\n os.unlink(actual_output_file)", "def delete_meta_file(self):\n try:\n self.logger.debug('Delete old metadata file %s.', self.meta_file_path)\n os.remove(self.meta_file_path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n raise MetadataError('Failed to delete old metadata file. {}'\n .format(ex.strerror))", "def clean_me(args):\n with open(args.input, 'rb') as infile:\n with open(args.output, 'wb') as outfile:\n \n for line in infile:\n if not 'xsi:nil=\"true\"' in line:\n outfile.write(line)\n else:\n print \"Removing %s\" % line", "def _clean_up_meta_results_file(self):\n\n print \"Reading in meta-results file...\"\n with open(self.meta_results_file, 'r') as f_in:\n meta_results_json = json.load(f_in)\n\n results = meta_results_json['Results']\n n = len(results)\n indices_to_delete = []\n for i in range(n):\n # Assumption if any file is missing skip entire dictionary item.\n design_valid = results[i]['Design'] in self.design_files\n test_bench_valid = os.path.basename(results[i]['TestBench']) in self.test_bench_files\n sum_rep_valid = results[i]['Summary'] in self.result_files\n if not (design_valid and test_bench_valid and sum_rep_valid):\n indices_to_delete.append(i)\n\n indices_to_delete.reverse()\n for i in indices_to_delete:\n del results[i]\n\n # Write out the new, reduced in size, results dictionary\n with open(self.meta_results_file,'wb') as f_out:\n json.dump(meta_results_json, f_out, indent=4)\n\n print \"Written out cleaned up results dictionary.\"", "def remove_office_metadata(file_name):\n\tns = {\n\t\t'cp': 'http://schemas.openxmlformats.org/package/2006/metadata/core-properties',\n\t\t'dc': 'http://purl.org/dc/elements/1.1/',\n\t\t'dcterms': 'http://purl.org/dc/terms/',\n\t\t'dcmitype': 'http://purl.org/dc/dcmitype/',\n\t\t'xsi': 'http://www.w3.org/2001/XMLSchema-instance'\n\t}\n\tfor prefix, uri in ns.items():\n\t\tElementTree.register_namespace(prefix, uri)\n\n\t_, file_ext = os.path.splitext(file_name)\n\ttmpfd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name), suffix=file_ext)\n\tos.close(tmpfd)\n\twith zipfile.ZipFile(file_name, 'r') as zin:\n\t\twith zipfile.ZipFile(tmpname, 'w') as zout:\n\t\t\tzout.comment = zin.comment\n\t\t\tfor item in zin.infolist():\n\t\t\t\tdata = zin.read(item.filename)\n\t\t\t\tif item.filename == 'docProps/core.xml':\n\t\t\t\t\troot = ElementTree.fromstring(data)\n\t\t\t\t\troot.clear()\n\t\t\t\t\tdata = ElementTree.tostring(root, 'UTF-8')\n\t\t\t\tzout.writestr(item, data)\n\tos.remove(file_name)\n\tos.rename(tmpname, file_name)", "def clean_metadata_from_xml(cls, xml_object):\r\n for attr in cls.metadata_attributes:\r\n if xml_object.get(attr) is not None:\r\n del xml_object.attrib[attr]", "def remove_dataset_file(sender, instance, **kwargs):\n if instance.original_file:\n if os.path.isfile(instance.original_file.path):\n os.remove(instance.original_file.path)", "def remove_extra_metadata(meta: dict) -> None:\n keys = get_extra_metadata_keys()\n remove_keys(data=meta, keys=keys)", "def clean_metadata(metaobj):\n if len(metaobj) == 1 and 'href' in metaobj[0] and '/api/programs/' in metaobj[0]['href']:\n metaobj = metaobj # Keep lastUpdated for program\n else:\n metaobj = remove_subset_from_set(metaobj, 'lastUpdated')\n metaobj = remove_subset_from_set(metaobj, 'lastUpdatedBy')\n metaobj = remove_subset_from_set(metaobj, 'created')\n metaobj = remove_subset_from_set(metaobj, 'createdBy')\n metaobj = remove_subset_from_set(metaobj, 'href')\n metaobj = remove_subset_from_set(metaobj, 'access')\n metaobj = remove_subset_from_set(metaobj, 'favorites')\n metaobj = remove_subset_from_set(metaobj, 'allItems')\n metaobj = remove_subset_from_set(metaobj, 'displayName')\n metaobj = remove_subset_from_set(metaobj, 'displayFormName')\n metaobj = remove_subset_from_set(metaobj, 'displayShortName')\n metaobj = remove_subset_from_set(metaobj, 'displayDenominatorDescription')\n metaobj = remove_subset_from_set(metaobj, 'displayNumeratorDescription')\n metaobj = remove_subset_from_set(metaobj, 'displayDescription')\n metaobj = remove_subset_from_set(metaobj, 'interpretations')\n if len(metaobj) > 0:\n for subtag in ['dashboardItems', 'analyticsPeriodBoundaries', 'mapViews', 'user', 'userGroupAccesses',\n 'programStageDataElements', 'programTrackedEntityAttributes',\n 'trackedEntityTypeAttributes', 'userCredentials', 'legends', 'greyedFields']:\n for i in range(0, len(metaobj)):\n if subtag in metaobj[i]:\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'lastUpdated')\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'lastUpdatedBy')\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'created')\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'createdBy')\n # There is access : { read: true, delete: false ... } dictionary\n # and there is access : \"rw----\"... Make sure we only delete the dictionary version\n if subtag not in ['user', 'userGroupAccesses']:\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'access')\n\n if subtag == 'programTrackedEntityAttributes':\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'name')\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'displayName')\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'displayFormName')\n metaobj[i][subtag] = remove_subset_from_set(metaobj[i][subtag], 'displayShortName')\n\n return metaobj", "def cleanup_metadata(self, cleanup_metadata):\n\n self._cleanup_metadata = cleanup_metadata", "def delete(self) -> None:\n try:\n self._logger.debug('Delete old metadata file %s.', self._path)\n os.remove(self._path)\n except OSError as ex:\n if ex.errno != errno.ENOENT:\n msg = 'Failed to delete old metadata file. {}'.format(ex.strerror)\n raise MetaFileError(msg)", "def test_remove_orphaned_metadata(self):\n self.tool.filesystem.write_text_file(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n 'orphaned.html.ini'),\n '[orphaned.html]\\n')\n self.tool.filesystem.write_text_file(\n self.finder.path_from_web_tests('external', 'wpt',\n 'infrastructure', 'metadata',\n 'testdriver.html.ini'),\n '[testdriver.html]\\n')\n self.tool.filesystem.write_text_file(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n '__dir__.ini'), 'expected: FAIL\\n')\n with self._patch_builtins():\n manifests = load_and_update_manifests(self.finder)\n self.command.remove_orphaned_metadata(manifests)\n self.assertFalse(\n self.tool.filesystem.exists(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n 'orphaned.html.ini')))\n self.assertTrue(\n self.tool.filesystem.exists(\n self.finder.path_from_web_tests('external', 'wpt', 'dir', 'is',\n '__dir__.ini')))\n self.assertTrue(\n self.tool.filesystem.exists(\n self.finder.path_from_web_tests('external', 'wpt',\n 'infrastructure', 'metadata',\n 'testdriver.html.ini')))", "def clean(self):\r\n\r\n for _, data in self.composition.items():\r\n index_file = Path(data['file'] + '.fxi')\r\n if index_file.exists():\r\n index_file.unlink()", "def clean():\n try:\n os.unlink(options.coords + 'mirza_mrna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_input' + '.fa')\n os.unlink(options.coords + 'mirza_mirna_expressions' + '.fa')\n except:\n pass", "def dev_clean():\n clean_files(\"csv\", True)\n clean_files(\"jsontxt\", True)", "def cleanup_intermediate_files(self):\n self.cmd(\"rm -f {local_temp_dir}/*rg_dict* \\\n {local_temp_dir}/*aln* \\\n {local_temp_dir}/snappy*\".\n format(\n local_temp_dir=self.local_temp_dir\n ),\n shell=True)", "def clean(self):\n if os.path.exists(self.initial):\n if os.path.exists(self.path) and os.stat(self.path).st_size == os.stat(\n self.initial).st_size:\n os.remove(self.initial)\n else:\n # if it doesn't match, something probably crashed; rename the temporary file and\n # it'll get uploaded at some point\n self.auto_filename()\n self.rename()\n self.connect()\n os.remove(self.initial)\n if os.path.exists(self.path):\n os.remove(self.path)\n self.filename_set = False", "def clean(self):\n\t\tself.archiver.closeFile()", "def clean_data(self, path, exclude_msgtypes=None):", "def _clean_data(self):\n if not path.exists('auto-mpg.data.txt'):\n logger.info('Could not find auto-mpg.data.txt in the current working directory')\n sys.exit()\n else:\n try:\n with open('auto-mpg.data.txt', 'r') as dirty_data:\n with open('auto-mpg.clean.txt', 'w') as clean_data:\n ## counter for row writes\n counter = 0\n for row in csv.reader(dirty_data):\n clean_data.write(row[0].expandtabs(1) + '\\n')\n counter +=1\n except Exception as e:\n logger.info('File error occurred: {e}. Exiting')\n sys.exit()", "def clean_files(self):\n self.filenames.clear()", "def clean(obj):\n clean_up_generated_files(obj)", "def clearMetaFiles(self, meta_id,fpath):\n\n try:\n con = self.getMetadataDatabaseConnection()\n con.cursor().callproc('clear_meta_analysis_files', [meta_id,fpath])\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n raise Exception(str(e))", "def strip_attributes(arff_file):\r\n start = arff_file.find('% filename')\r\n new_arff = arff_file[start:]\r\n return new_arff", "def photo_edit_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.upload.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def clean_old_data():\n logger.info('Cleaning standalone files on disk...')\n for absolute_path in glob.glob(MEDIA_URL + '*'):\n file_name = os.path.basename(absolute_path)\n try:\n relative_path = os.path.join(AUDIOS_URL, file_name)\n audio = Audio.objects.get(filename=relative_path)\n if audio.get_type() == 'episode':\n try:\n # If there are inactive audios on its being\n for e in audio.podcast.episode_set.exclude(pk=audio.podcast.active_episode.pk):\n if not e.is_active():\n logger.info('Inactive audio found in podcast set. Erasing files.')\n e.delete_files()\n except Exception, e:\n logger.exception(e.message)\n except ObjectDoesNotExist, e:\n logger.info('A file with no audio registered in database')\n if os.path.isfile(relative_path):\n logger.info('Erasing: %s' % relative_path)\n os.remove(relative_path)\n logger.info('... Done.')", "def removeMeta(self, row, column):\n filePath = self.filesList.selectedItems()[0].text(2)\n metaHeader = (self.metadataList.item(row, 0)).text()\n logging.debug(\"Removing metadata \" + metaHeader + \" from \" + str(filePath))\n self.filesList.removeMeta(filePath, metaHeader, row)", "def cleanup_file(name: str):\n if os.path.exists(name) and os.path.isfile(name): # h5\n os.remove(name)\n elif os.path.exists(name) and os.path.isdir(name): # tf\n shutil.rmtree(name)", "def clean():\n clean_files()" ]
[ "0.68618995", "0.64275825", "0.6352827", "0.6342364", "0.6335688", "0.6319614", "0.6263183", "0.62409323", "0.62234294", "0.6124749", "0.6112678", "0.6087682", "0.60876715", "0.60499", "0.5985976", "0.5985", "0.5943836", "0.59089786", "0.58957404", "0.5872892", "0.5868339", "0.5862927", "0.5862016", "0.58583045", "0.58416253", "0.58062035", "0.5795195", "0.57848597", "0.5760763", "0.57398194" ]
0.7287814
0
When all metadata has been cleared
def handleAllMetaClear(self, path): logging.debug("All Metadata removed, clearing the table...") self.metadataList.clear() self.metadataList.setRowCount(0) self.metadataList.setHorizontalHeaderLabels(["Metadata Header", "Value"]) self.fileNotSupported.hide() self.changeEnableMenus(self.filesList.getFileObj(path))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clear(self):\n for tag in self.meta.findall(CN('meta:user-defined')):\n self.meta.remove(tag)", "def _data_reset(self):\n conn = self.get_connection()\n\n elements = {\n **self.domain.registry.aggregates,\n **self.domain.registry.entities,\n **self.domain.registry.views,\n }\n for _, element_record in elements.items():\n provider = current_domain.providers[element_record.cls.meta_.provider]\n repo = self.domain.repository_for(element_record.cls)\n\n model_cls = repo._model\n if provider.conn_info[\n \"DATABASE\"\n ] == Database.ELASTICSEARCH.value and conn.indices.exists(\n model_cls._index._name\n ):\n conn.delete_by_query(\n refresh=True,\n index=model_cls._index._name,\n body={\"query\": {\"match_all\": {}}},\n )", "def handleCleanMetadataKeep(self):\n logging.debug(\"Removing all metadata found...\")\n filePath = self.filesList.selectedItems()[0].text(2)\n self.filesList.removeAllMeta(filePath)", "def clear(self):\n ...", "def clear(self):\n for key in self.__data.keys():\n del self.__data[key]", "def clear(self):\n self._post_init()", "def clear(self) -> None:", "def clear_all(self):\n raise NotImplementedError", "def clear(self) -> None:\n ...", "async def clear_all(self) -> None:", "def finalize(self):\n self.convert_deltas(list(self.config.keys()))\n self.set_timeseries_metadata(list(self.config.keys()))", "def clean(self):\n super(NoneCache, self).clean()", "def clear():", "def clear_modified(self):\n self._data.clear_modified()", "def cleanup_metadata(self, cleanup_metadata):\n\n self._cleanup_metadata = cleanup_metadata", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self): # real signature unknown; restored from __doc__\n pass", "def _clear(self):\n self.events = []\n self.last_on = None\n self.last_off = None", "def clear_data(self):\n if isinstance(self.data, DataManager):\n self.data._update_keys(clear=True)\n else:\n self.data = {}", "async def clear(self):", "def cleanup_and_reset(self):\n self.mem.set(self.mem.META_PLAN, None)\n self.mem.set(self.mem.META_GOALS, None)\n self.mem.set(self.mem.META_CURR_GOAL, None)", "def clear(self):\n self.events={}", "def clear_storage(self):\r\n raise NotImplementedError('override me')", "def reset(self):\n for k in self.data_keys:\n setattr(self, k, [])\n self.size = 0" ]
[ "0.6969182", "0.67796546", "0.67119086", "0.6706552", "0.6681175", "0.66797686", "0.6666315", "0.66634107", "0.666299", "0.6652054", "0.66436225", "0.6625275", "0.66200304", "0.66101605", "0.6604598", "0.6599554", "0.6599554", "0.6599554", "0.6599554", "0.6599554", "0.6599554", "0.6599554", "0.6592889", "0.6584248", "0.65835744", "0.65834886", "0.65623516", "0.6551341", "0.65468264", "0.65279174" ]
0.7667401
0
Print whatever is displayed in second tab as pdf to a file
def printPdfPersonalData(self, fileName, fileType, outFile): self.filesList.doBackup(fileName) self.filesList.cleanPdataMarks(fileName) if fileType != 'Pdf': outFile = AddedFile.changeExt(outFile, "pdf") #try: printer = QtGui.QPrinter() printer.setPageSize(QtGui.QPrinter.Letter) printer.setResolution(96) print "Page size ", str(self.filesList.getPdataDocSize(fileName).height()) printer.setPaperSize(QtCore.QSizeF(self.filesList.getPdataDocSize(fileName)), QtGui.QPrinter.Point) printer.setOutputFormat(QtGui.QPrinter.PdfFormat) printer.setOutputFileName(outFile) printer.setFullPage(True) self.personalDataList.document().print_(printer) self.writeDetails("Writing PDF to " + outFile) self.filesList.loadBackup(fileName) #Finally clean the new pdf self.cleanPrintedPdf(outFile); # self.filesList.refreshPdata(fileName) #except Exception: # self.writeDetails("Failed to write to " + fileName)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_pdf(f,s1,s2='',s3=''):\n # does not need reportlab!\n if s1 == 'White Ballot': s1 = '\"'+'_'*10+'\"'\n cod = zlib.compress('BT /F1 16 Tf ET\\r\\nBT 300 270 Td (%s) Tj ET\\r\\nBT /F1 48 Tf ET\\r\\nBT 5 180 Td (%16s) Tj ET\\r\\nBT /F1 12 Tf ET\\r\\nBT 10 50 Td (%s) Tj ET'%(s3,s1,s2))\n open(f,'w').write(create_pdf.__doc__ + '/Length %d>>\\nstream\\n'%len(cod) + cod + 'endstream endobj\\ntrailer<</Root 4 0 R>>')", "def exportTable(self):\n\t\tself.pdf = \tself.dir + \"/application.pdf\"\n\t\tpdf = pisa.CreatePDF(\n\t\t\tfile(self.html, \"r\" ),\n\t\t\tfile(self.pdf, \"wb\")\n\t\t\t)", "def savePDFFile(self):\n s = self.text.get(\"1.0\", tk.END)\n f = open(file, \"w\", encoding='utf-8')\n f.write(s)\n f.close()\n\n # Create a file for each student with their graded files\n pdf = FPDF()\n pdf.add_page()\n pdf.set_font(\"Arial\", size=12)\n pdf.multi_cell(0, 5, s)\n\n # Removed the \\t from the filepath in order to save as pdf in 'Graded' file\n savingFilePDF = re.sub('\\t', '', item_text[0] + \".pdf\")\n pdf.output(gradedFilesFolder + \"\\\\\" + savingFilePDF)\n highlightingTextInFile()", "def generate_pdf(self):\n x = 100\n y = 100\n buffer = BytesIO()\n p = canvas.Canvas(buffer, pagesize=\"A4\")\n p.drawString(x, y, \"TO DO\")\n p.showPage()\n p.save()\n pdf = buffer.getvalue()\n buffer.close()\n return pdf", "def choose_pdf(self, args, numpdf):\n filetypes = ((\"Portable Document Format (PDF)\", \"*.pdf\"), (\"All Files\", \"*\"))\n filename = fd.askopenfilename(title=\"Choose the PDF file\", initialdir=os.path.abspath(os.sep),\n filetypes=filetypes)\n if numpdf == 1:\n self.entry_firstPDF.delete(0, tk.END)\n self.entry_firstPDF.insert(0, filename)\n\n else:\n self.entry_secondPDF.delete(0, tk.END)\n self.entry_secondPDF.insert(0, filename)", "def __get_ticket_print(self, **kwargs):\n # TODO: выяснить используется ли pdf в принципе. В эл.регестратуре он никак не используется\n # TODO: pdf creator based on Flask templates and xhtml2pdf\n return \"\"", "def pdf_to_test(file_name):\n #Opening, reading and parsing a pdf file to string\n pdfFileObj = open(file_name, 'rb')\n pdfReader = PyPDF2.PdfFileReader(pdfFileObj)\n pdf_string = pdfReader.getPage(0).extractText()\n \n #Find the RechnungsNr.\n start_of_RN = pdf_string.find(\"No.Invoice Date\") + len(\"No.Invoice Date\")\n rechnungs_nr = pdf_string[start_of_RN:start_of_RN+7]\n \n #Find the address\n start_of_address = pdf_string.find(\"Invoice Address\") + len(\"Invoice Address\")\n end_of_address = pdf_string.find(\"Payment Terms:\")\n address = pdf_string[start_of_address:end_of_address]\n \n #Liefermonat commenrs\n start_of_contract = pdf_string.find(\"Company Name / Line of business\") + len(\"Company Name / Line of business\")\n end_of_contract = pdf_string.find(\"Summary of Charges\")\n contract = pdf_string[start_of_contract:end_of_contract]\n \n #Nettobetrag - read base charge\n start_of_netto = pdf_string.find(\"Base Charges\") + len(\"Base Charges\")\n end_of_netto = pdf_string.find(\"Click Charges - Color\")\n nettobetrag = pdf_string[start_of_netto:end_of_netto]\n \n pdfFileObj.close()\n \n return pdfFileObj.name, rechnungs_nr, address, contract, nettobetrag", "def buildPDF(self):\n\n # TODO: get this working\n # TODO: make this configurable via a dialog\n os.chdir(self.file_path.parent)\n proc = subprocess.Popen(\n [\"make\", \"latexpdf\"],\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n proc.wait()\n for line in proc.stdout:\n print(\"stdout: \" + line.rstrip())", "def printFile(self):\n qprinter = QtGui.QPrinter()\n print_dialog = PrintDialog(qprinter)\n ret = print_dialog.run()\n if (ret == QtGui.QDialog.Accepted):\n self.ui.main_edit.document().print_(qprinter)", "def render_as_pdf(self, width, height):\n pass", "def pdf():\n env.file_ext = \".pdf\"\n local(\"pandoc {input_files} -o {output_file}{file_ext} -H {preamble_file} --template {template_file} --bibliography={bib_file} --csl={csl_file} -V fontsize=12pt -V papersize=a4paper -V documentclass:report -N --latex-engine=xelatex\".format(**env))", "def main():\n f_name = sys.argv[1]\n file_contents = open(f_name).read()\n C = CAST([], \"python\")\n C2 = C.from_json_str(file_contents)\n\n V = CASTToAGraphVisitor(C2)\n last_slash_idx = f_name.rfind(\"/\")\n file_ending_idx = f_name.rfind(\".\")\n pdf_file_name = f\"{f_name[last_slash_idx + 1 : file_ending_idx]}.pdf\"\n V.to_pdf(pdf_file_name)", "def download(texttitle):\n try:\n body = current_file.analysed_texts['Regular']\n rendered = render_template('pdf_template.html', title=texttitle, body=body)\n options = {'encoding': \"UTF-8\"}\n pdf = pdfkit.from_string(rendered, False, options=options)\n response = make_response(pdf)\n response.headers[\"Content-Type\"] = 'application/pdf'\n response.headers[\"Content-Disposition\"] = 'attachment; filename=output.pdf'\n\n return response\n except Exception as e:\n flash(\"Something went wrong, please try again\")\n return redirect(request.referrer)", "def generate_pdf (tmp_folder, filename, pdf_page_size):\n\n shell_cmd = PDF_CONVERT_CMD.substitute(wkhtmltox_path=WKHTMLTOX_PATH, folder=tmp_folder, article_id=filename, page_size=pdf_page_size)\n proc = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout_value, stderr_value = proc.communicate()\n\n print u'\\n'.join(filter(None, [shell_cmd, stdout_value, stderr_value]))", "def process_pdf(filename, qualies_only=False):\n if filename.endswith('.txt'):\n f = open(filename)\n text = f.read()\n f.close()\n else:\n text = subprocess.check_output([\"pdftotext\", \"-layout\",\n filename, \"-\"]).decode('utf-8')\n\n print(\"Processing {}...\".format(filename))\n\n pages = text.split(chr(12))\n print (\"{} Pages\".format(len(pages)))\n md = []\n qd = []\n for p in pages:\n if ('MAIN DRAW SINGLES' in p or 'Singles Championship' in p\n or 'Ladies\\' Singles' in p):\n md += [p]\n elif ('QUALIFYING SINGLES' in p or 'Qualifying Singles' in p\n or 'Qualifying Ladies\\' Singles' in p):\n qd += [p]\n elif ('Qualifiers' in p and not 'Doubles' in p):\n qd += [p]\n\n md_result = None\n qd_result = None\n\n meta = None\n if md and not qualies_only:\n md_result = drawsheet_process(chr(12).join(md))\n meta = md_result[2]\n\n # copy the metadata to the quaily draw if possible\n if qd:\n qd_result = drawsheet_process(chr(12).join(qd), meta, True)\n\n return (md_result, qd_result)", "def load_pdf(self, env=\"default\", debug=()):\n os.makedirs(\"txt\", exist_ok=True)\n if env is \"default\": # default python path\n call([executable,\n os.path.join(f\"{exec_prefix}\", \"Scripts\", \"pdf2txt.py\"),\n os.path.join(\"pdf\", f\"{self.pdf_filename}\"),\n os.path.join(f\"-otxt\", f\"{self.txt_filename}\")])\n if env is \"venv\": # virtual environment\n call([os.path.join(\"venv\", \"Scripts\", \"python.exe\"),\n os.path.join(\"venv\", \"Scripts\", \"pdf2txt.py\"),\n os.path.join(\"pdf\", f\"{self.pdf_filename}\"),\n os.path.join(f\"-otxt\", f\"{self.txt_filename}\")])\n with open(os.path.join(\"txt\", f\"{self.txt_filename}\"), \"r\", encoding=\"utf-8\") as file:\n self.paragraphs = [paragraph.rstrip('\\n') for paragraph in file]\n os.remove(os.path.join(\"txt\", f\"{self.txt_filename}\"))\n if debug:\n for counter, paragraph in enumerate(self.paragraphs):\n try:\n if int(debug[0]) < counter < int(debug[1]):\n print(counter, paragraph)\n except TypeError:\n print(\"Debug must be a (x,y) touple.\")", "def click_ver_pdf(self):\n self.button.click(liquidaciones_historicas_catalog.VINCULO_VER_PDF)", "def print_all(self):\n if not request:\n raise exceptions.Warning(_(''), _(''))\n session_id = request.session.sid\n config = self.env['ir.config_parameter']\n addons_url = config.get_param('addons_path')\n phantomjs_path = config.get_param('phantomjs_path')\n phantomjs_path = 'phantomjs' if not phantomjs_path else phantomjs_path\n print_url = self.env.context.get('protocol_url', False)\n if print_url:\n print_urls = [print_url]\n else:\n print_urls = self._get_print_urls()\n if not print_urls:\n return\n phantom = [\n phantomjs_path,\n addons_url +\n '/quality_protocol_report/static/src/js/phantom_url_to_pdf.js',\n session_id, \"/tmp\"] + print_urls\n process = subprocess.Popen(phantom)\n process.communicate()\n filenames = []\n for url in print_urls:\n fname = url.replace('/', '').replace(':', '')\n weight_pos = fname.find('?weight=')\n if weight_pos > -1:\n fname = fname[weight_pos+8:weight_pos+10] + '-' + fname[:weight_pos]\n filenames.append('/tmp/' + fname + '.pdf')\n filepath = self._merge_pdf(sorted(filenames))\n fildecode = open(filepath, 'r')\n encode_data = fildecode.read()\n fildecode.close()\n active_model = self.env.context.get('active_model', False)\n active_id = self.env.context.get('active_id', False)\n ungrouped_also = self.env.context.get('print_ungrouped_also', False)\n if active_model and active_id and not ungrouped_also:\n active_name = self.env[active_model].browse([active_id]).name\n else:\n dt = fields.Datetime.context_timestamp(self, datetime.now())\n active_name = dt.strftime('%d-%m-%Y_%Hh%M')\n filename = 'protocolo.pdf' if print_url else \\\n 'protocolos_' + str(active_name).lower() + '.pdf'\n attachment_data = {\n 'name': filename,\n 'datas_fname': filename,\n 'datas': base64.b64encode(encode_data),\n 'res_model': active_model,\n 'res_id': 0 if print_url else self.env.context.get('active_id', False),\n }\n self.env['ir.attachment'].search(\n [('name', '=', attachment_data['name']),\n ('res_id', '=', attachment_data['res_id']),\n ('res_model', '=', attachment_data['res_model'])]).unlink()\n attachment = self.env['ir.attachment'].create(attachment_data)\n\n filenames.append(filepath)\n for my_file in filenames:\n os.remove(my_file)\n\n if print_url:\n return {\n 'type': 'ir.actions.act_url',\n 'url': '/web/binary/saveas?model=ir.attachment&field=datas' +\n '&filename_field=name&id=%s' % (attachment.id),\n 'target': 'self',\n }\n else:\n return {'type': 'ir.actions.act_window_close'}", "def write_print_only_pages(outfile: TextIO, species: list, refdict: dict) -> None:\n print_cover(outfile)\n print_title_page(outfile)\n print_copyright_page(outfile, refdict)\n print_table_of_contents(outfile, species)", "def print_report_pdf(self):\n self.ensure_one()\n return self.env.ref('eliterp_sale_reports.action_report_product_catalogue').report_action(self)", "def create_print_issue_pdf(issue, **kwargs):\n if isinstance(issue, int):\n issue = Issue.objects.get(id=issue)\n\n editions = [('', PAGES_GLOB), ('_mag', MAG_PAGES_GLOB)]\n results = []\n for suffix, fileglob in editions:\n\n pdf_name = OUTPUT_PDF_NAME.format(issue=issue, suffix=suffix)\n logger.info('Creating pdf: {}'.format(pdf_name))\n tmp_bundle_file = tempfile.NamedTemporaryFile(suffix='.pdf')\n try:\n create_web_bundle(\n issue=issue,\n filename=tmp_bundle_file.name,\n fileglob=fileglob,\n **kwargs,\n )\n except RuntimeWarning as warning:\n logger.info(str(warning))\n continue\n try:\n print_issue = PrintIssue.objects.get(pdf__contains=pdf_name)\n except PrintIssue.DoesNotExist:\n print_issue = PrintIssue()\n with open(tmp_bundle_file.name, 'rb') as src:\n pdf_content = ContentFile(src.read())\n print_issue.pdf.save(pdf_name, pdf_content, save=False)\n print_issue.issue = issue\n print_issue.save()\n logger.info('New bundle file: {}'.format(pdf_name))\n results.append(pdf_name)\n return results", "def to_pdf(self, wkhtmltopdf: str, f, output_file: Optional[str] = None):\n if output_file is None:\n output_file = \"-\"\n html = self(f)\n with tempfile.NamedTemporaryFile(\"wb\", suffix=\".html\") as fd:\n html.write(fd)\n fd.flush()\n res = subprocess.run([wkhtmltopdf, fd.name, output_file], stdin=subprocess.DEVNULL, capture_output=True)\n if res.returncode != 0:\n raise RuntimeError(\"%s exited with error %d: stderr: %s\", self.wkhtmltopdf, res.returncode, res.stderr)\n if output_file == \"-\":\n return res.stdout", "def create_pdf(self):\n\n my_datetime = datetime.now()\n self.pdf_name = (\n self.pdf_name + \"_\" + my_datetime.strftime(\"%H%M_%d%m%Y\") + \".pdf\"\n )\n fig_width = aW * self.column_ratio[0]\n\n clm_width_meta = (aW * self.column_ratio[1]) / len(self.fields)\n\n c = canvas.Canvas(os.path.join(self.pdf_folder, self.pdf_name), pagesize=A4)\n\n for qc_run_id, fig_file in sorted(self._files.items()):\n (param_values, feature_values) = get_param_values(\n qc_run_id, self.db_name, return_meta_add_on=True\n )\n\n comment = self.subject + \"<br/>\"\n # c.saveState()\n title = \"Dataset \" + qc_run_id\n\n # Prepare header\n header = Paragraph(title, title_style)\n h_w, h_h = header.wrap(aW, aH)\n\n # Prepare image\n img = ImageReader(fig_file)\n im_width, im_height = img.getSize()\n aspect = im_height / float(im_width)\n fig_height = fig_width * aspect\n\n # Prepare metadata section\n\n meta_table = Table(\n param_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n meta_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n\n meta_width, meta_height = meta_table.wrap(aW - im_width, aH / 2)\n\n # Prepare comments header\n comments_header = Paragraph(\"Comments:\", title_style)\n avail_height = aH - fig_height - v_padding\n comm_h_width, comm_h_height = comments_header.wrap(\n im_width, avail_height # aW - meta_width,\n )\n # Prepare comments\n my_datetime = datetime.now()\n ts = \"Printed on \" + my_datetime.strftime(\"%c\")\n\n try:\n data_specific_comment = self.comments[int(qc_run_id)]\n comment += data_specific_comment + \"<br/>\"\n comment += self.comments[\"general\"] + \"<br/>\"\n\n comment += self.smalltalk + \"<br/>\"\n except Exception:\n logger.warning(\n \"Unable to summarize result of \" + \"dataset {}\".format(qc_run_id)\n )\n comment_ts = comment + ts\n comment_ts = textwrap.fill(comment_ts, 70)\n comment_ts = comment_ts.replace(\"\\n\", \"<br/>\")\n\n comments_p = Paragraph(comment_ts, body_style)\n\n avail_height = aH - fig_height - v_padding - comm_h_height\n\n comm_width, comm_height = comments_p.wrap(im_width, avail_height) # aW,\n\n line_widths = comments_p.getActualLineWidths0()\n number_of_lines = len(line_widths)\n if number_of_lines > 1:\n pass\n if number_of_lines == 1:\n min(line_widths)\n comm_width, comm_height = comments_p.wrap(im_width, avail_height)\n\n # Prepare features\n feat_table = Table(\n feature_values,\n colWidths=[clm_width_meta] * len(self.fields),\n hAlign=\"CENTER\",\n rowHeights=0.22 * inch,\n )\n feat_table.setStyle(\n TableStyle(\n [\n (\"FONT\", (0, 0), (-1, 0), \"Helvetica-Bold\"),\n (\"FONT\", (0, 1), (-1, -1), \"Helvetica\"),\n (\"LINEBELOW\", (0, 0), (1, 0), 0.08, colors.black),\n (\"SIZE\", (0, 0), (-1, -1), 8),\n (\"VALIGN\", (0, 0), (-1, -1), \"BOTTOM\"),\n # ('ALIGN', (0, 0), (-1, 0), 'CENTER'),\n (\"ALIGN\", (0, 0), (0, -1), \"LEFT\"),\n (\"ALIGN\", (1, 1), (1, -1), \"LEFT\"),\n (\"INNERGRID\", (0, 0), (-1, -1), 0.08, colors.beige),\n # ('BOX', (0,0), (-1,-1), 0.25, colors.grey),\n ]\n )\n )\n avail_height = aH - meta_height # fig_height - v_padding - comm_h_height\n avail_height -= comm_height\n feat_width, feat_height = feat_table.wrap(aW - im_width, avail_height)\n\n # Draw everyting on canvas\n\n header.drawOn(c, left_margin, aH - top_margin)\n\n c.drawImage(\n img,\n left_margin,\n aH - top_margin - fig_height - v_padding,\n width=fig_width * 1.1,\n height=fig_height * 1.1,\n mask=\"auto\",\n )\n\n meta_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2, # - v_padding\n )\n\n comments_header.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - fig_height\n - 2 * v_padding, # - add_on_height\n )\n\n comments_p.drawOn(\n c,\n left_margin,\n aH\n - top_margin\n - comm_h_height\n - comm_height\n - fig_height\n - 2 * v_padding\n - comm_h_height, # - add_on_height\n )\n\n feat_table.drawOn(\n c,\n left_margin + fig_width + h_padding,\n aH - meta_height - top_margin / 2 - feat_height - v_padding,\n # top_margin - fig_height - 2*v_padding - feat_height\n )\n\n # new page\n c.showPage()\n c.saveState()\n\n c.save()", "def template1(self):\n self.indirectobject(1, 0, \"<<\\n /Type /Catalog\\n /Outlines 2 0 R\\n /Pages 3 0 R\\n>>\")\n self.indirectobject(2, 0, \"<<\\n /Type /Outlines\\n /Count 0\\n>>\")\n self.indirectobject(3, 0, \"<<\\n /Type /Pages\\n /Kids [4 0 R]\\n /Count 1\\n>>\")\n self.indirectobject(4, 0, \"<<\\n /Type /Page\\n /Parent 3 0 R\\n /MediaBox [0 0 612 792]\\n /Contents 5 0 R\\n /Resources <<\\n /ProcSet [/PDF /Text]\\n /Font << /F1 6 0 R >>\\n >>\\n>>\")\n self.indirectobject(6, 0, \"<<\\n /Type /Font\\n /Subtype /Type1\\n /Name /F1\\n /BaseFont /Helvetica\\n /Encoding /MacRomanEncoding\\n>>\")", "def merger_page_pdf(self, input_pdf, output_pdf):\n output = PdfFileWriter()\n # Appending two pdf-pages from two different files\n _input_pdf = PdfFileReader(open(input_pdf, \"rb\"))\n for i in range(30):\n page = _input_pdf.getPage(0)\n artbox = page.artBox\n x = artbox[0]\n y = artbox[1]\n y = artbox[2]\n y = artbox[3]\n output.addPage(page)\n # output.addPage(_input_pdf.getPage(0))\n # output.addPage(_input_pdf.getPage(0))\n\n # Writing all the collected pages to a file\n output.write(open(output_pdf, \"wb\"))\n\n\n # Creating a routine that appends files to the output file\n\n\n # Creating an object where pdf pages are appended to", "def create_bill_pdf(obj):\n data = {\n 'today': datetime.date.today(),\n 'amount': obj.price,\n 'customer_name': obj.company.company_name,\n 'order_id': obj.pk,\n }\n pdf = render_to_pdf('pdf/invoice.html', data)\n filename = obj.company.company_name + '_' + obj.promotion.campaign_name + '_' + \\\n datetime.datetime.now().strftime(\"%Y-%m-%d\") + '.pdf'\n obj.bill.save(filename, File(io.BytesIO(pdf.content)))", "def render_pdf(self, target=None, zoom=1):\n return self._document.write_pdf(target=target, zoom=zoom)", "def join_files():\n files = [ent_1.get(), ent_2.get()]\n out_writer = PyPDF2.PdfFileWriter()\n for file in files:\n pdf_file = open(file, 'rb')\n file_reader = PyPDF2.PdfFileReader(pdf_file)\n for page in range(file_reader.numPages):\n pageObj = file_reader.getPage(page)\n out_writer.addPage(pageObj)\n\n output_file_name = result_entry.get()\n output_file = open(output_file_name, 'wb')\n out_writer.write(output_file)\n output_file.close()\n pdf_file.close()\n opener = \"open\" if sys.platform == \"darwin\" else \"xdg-open\"\n subprocess.call([opener, output_file_name])\n clear_labels()", "def generate_pdf(pdf_data):\n\n html = HTML(string=pdf_data)\n f = html.write_pdf()\n\n return f", "def printing():\r\n document.add_heading('Printing Service details', 1)\r\n\r\n printing_metrics = ['customproperties',\r\n 'workingSetSizeHiPct',\r\n 'logVerbosityAuditActivity',\r\n 'logVerbosityService',\r\n 'hostname',\r\n 'tags']\r\n\r\n printnodes = get_qlik_sense.get_printing()\r\n num_of_nodes = len(printnodes)\r\n num_of_print_metrics = len(printing_metrics)\r\n table = document.add_table(rows=num_of_print_metrics+1, cols=num_of_nodes+1)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'Metric'\r\n for item in range(0, num_of_nodes):\r\n row.cells[item+1].text = printnodes[item][6]\r\n for item in range(num_of_print_metrics):\r\n row = table.rows[item+1]\r\n row.cells[0].text = str(printing_metrics[item])\r\n for printnode in range(num_of_nodes):\r\n row.cells[printnode+1].text = str(printnodes[printnode][item])\r\n\r\n document.add_page_break()" ]
[ "0.63386774", "0.603243", "0.60320944", "0.58658314", "0.5812183", "0.57883507", "0.5786723", "0.578667", "0.5737755", "0.57349306", "0.572005", "0.5702959", "0.5675269", "0.56572753", "0.56041884", "0.5600227", "0.5594943", "0.5593114", "0.5561789", "0.55454755", "0.55392176", "0.55200636", "0.55176204", "0.55104595", "0.5481803", "0.5472791", "0.5458981", "0.5455647", "0.54471004", "0.54460424" ]
0.6298324
1
Fired to change monitor settings
def handleMonitorSettings(self): winPos = self.mainWindow.pos() popPos = QtCore.QPoint(winPos.x() + (self.mainWindow.width() - self.settingsPopup.width()) / 2, winPos.y() + self.mainWindow.height() / 2) self.monitorPopUp.move(popPos) self.monitorPopUp.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def onSettings(self):\n pass", "def on_settings(self):\n\n # Pull the current app state from the relay Observer object\n status, interval, ntfc_status, ntfc_state = settings_state.get_state()\n\n # Pass it to the Observable object in order to render the Settings window\n settings_changed, update_interval, ntfc_changed, ntfc_selected = render_settings_window(\n status, interval, ntfc_status, ntfc_state, settings_state)\n\n # Register any state changes\n settings_state.update_state(settings_changed, update_interval, ntfc_changed, ntfc_selected)\n\n # If the interval has changed, reprogram scheduler to run at the new interval\n if settings_state.intrvl_change_trig:\n modify_scheduler(JOB_ID, settings_state.settings_interval)\n\n if settings_state.notification_change_trig:\n NewsIndicator.notifications = False if not settings_state.notification_state else True", "def settings_changed(self, name, value):\n return", "def on_pre_enter(self):\n Logger.info('Application: Changed to the Settings screen.')", "def set_monitor(self, track, xclip, ident, args):\n if track in self.song().tracks and not track.is_foldable:\n if args in MON_STATES:\n track.current_monitoring_state = MON_STATES[args]\n else:\n if track.current_monitoring_state == 2:\n track.current_monitoring_state = 0\n else:\n track.current_monitoring_state += 1", "def open_settings(self, event):\n settings_dialog = cfg.SettingsDialog(parent=self, exclude=['window'])\n res = settings_dialog.ShowModal()\n if res == wx.ID_OK:\n # Reload relevant parts of app\n restart_monitor_timer = False\n restart_gui_timer = False\n reload_correlations = False\n reload_logger = False\n reload_graph = False\n\n for setting in settings_dialog.changed_settings:\n # If any 'monitor.' settings except 'monitor.divergence_threshold have changed then restart\n # monitoring timer with new settings.\n # If 'monitor.interval has changed then restart gui timer.\n # If 'monitor.monitoring_threshold' has changed, then refresh correlation data.\n # If any 'logging.' settings have changed, then reload logger config.\n if setting.startswith('monitor.') and setting != 'monitor.divergence_threshold':\n restart_monitor_timer = True\n if setting == 'monitor.interval':\n restart_gui_timer = True\n if setting == 'monitor.monitoring_threshold':\n reload_correlations = True\n if setting.startswith('logging.'):\n reload_logger = True\n if setting.startswith('monitor.calculations'):\n reload_graph = True\n\n # Now perform the actions\n if restart_monitor_timer:\n self.__log.info(\"Settings updated. Reloading monitoring timer.\")\n self.__cor.stop_monitor()\n\n # Build calculation params and start monitor\n calculation_params = [self.__config.get('monitor.calculations.long'),\n self.__config.get('monitor.calculations.medium'),\n self.__config.get('monitor.calculations.short')]\n\n self.__cor.start_monitor(interval=self.__config.get('monitor.interval'),\n calculation_params=calculation_params,\n cache_time=self.__config.get('monitor.tick_cache_time'),\n autosave=self.__config.get('monitor.autosave'),\n filename=self.__opened_filename)\n\n if restart_gui_timer:\n self.__log.info(\"Settings updated. Restarting gui timer.\")\n self.timer.Stop()\n self.timer.Start(self.__config.get('monitor.interval') * 1000)\n\n if reload_correlations:\n self.__log.info(\"Settings updated. Updating monitoring threshold and reloading grid.\")\n self.__cor.monitoring_threshold = self.__config.get(\"monitor.monitoring_threshold\")\n self.__refresh_grid()\n\n if reload_logger:\n self.__log.info(\"Settings updated. Reloading logger.\")\n log_config = cfg.Config().get('logging')\n logging.config.dictConfig(log_config)\n\n if reload_graph:\n self.__log.info(\"Settings updated. Reloading graph.\")\n if len(self.__selected_correlation) == 2:\n self.show_graph(symbol1=self.__selected_correlation[0], symbol2=self.__selected_correlation[1])", "def set_config(self): # called from button_set object \n self.settings['lights_on'] = self.lights_on.get()\n self.settings['lights_off'] = self.lights_off.get()\n self.settings['ambient_min'] = self.ambient_min.get()\n self.settings['soil_1'] = self.smc1.get()\n self.settings['soil_2'] = self.smc2.get()\n self.settings['soil_3'] = self.smc3.get()\n self.settings['soil_4'] = self.smc4.get()\n self.settings['overhead_level'] = self.overhead_level.get()\n\n # Save settings to config file in case of reboot / power-loss\n print \"UPDATING SETTINGS FILE\"\n with open(self.settings_path, 'w') as jsonfile:\n jsonfile.write(json.dumps(self.settings, indent=4))\n self.active_changes = True # (flag) changes are active!", "def _onSettings(self, event):\n dialog = sc.SettingsDialog(self)\n if dialog.ShowModal() == wx.ID_OK:\n dialog.saveSettings()\n dialog.Destroy()", "def preferencesChanged(self):\n self.__logViewer.preferencesChanged()", "def updateSettingsUI(self):\n\n pass", "def config_probe(self, widget, data=None):\n\t\tConfigure.ExcludeServer = (int(self.builder.get_object(\"MasterRadio\").get_active()))\n\t\tConfigure.MaxNodes = \t (int(self.builder.get_object(\"NodeScale\").get_value()))\n\t\tConfigure.LocalhostOnly = (int(self.builder.get_object(\"LocalHostRadio\").get_active()))\n\t\tConfigure.TimeStep = \t (int(self.builder.get_object(\"TimeStepScale\").get_value()))\n\t\tConfigure.Interval = \t (int(self.builder.get_object(\"IntervalScale\").get_value()))\n\n\t\tnomeFile = (str(self.builder.get_object(\"NameText\").get_text()))\n\n\t\tif ('/' not in nomeFile) : Configure.SaveConfig(NewFile=\"./extra/UserOutput/\"+nomeFile)\n\t\telse : Configure.SaveConfig(NewFile = nomeFile)\n\t\t\n\n\t\tprint \"### Sending setup signal to Monitor...\"\n\t\tself.setup_monitor()", "def preferencesChanged(self):\n # do nothing\n pass", "def update_log_config(self, monitor_name, log_config):\n pass", "def set_settings_devices(self):\n self.set_thermostat, self.set_humidifier, self.set_sprinklers, self.set_ventilation = self.settings[3:]", "def change_settings(settings, methods=['GET', 'POST']):\n message = resolve_settings(settings)\n socketio.emit('settings_update', SETTINGS)\n socketio.emit('log', message)", "def settings_push_callback(channel):\n \n global settingsMode, settingsCount\n\n settingsMode = True\n settingsCount = 0\n sleep(.5) # A little time for debouncing\n \n with canvas(device) as draw:\n settingsMenu( draw, settingsCount)", "def test_change_config(self):\n browser = Browser(self.app)\n portalURL = self.portal.absolute_url()\n browser.addHeader('Authorization', 'Basic %s:%s' % (SITE_OWNER_NAME, SITE_OWNER_PASSWORD))\n browser.open(portalURL + '/@@overview-controlpanel')\n browser.getLink('Image WatchDog settings').click()\n browser.getControl('Optimize PNG').selected = True\n browser.getControl('Enabled').selected = True\n browser.getControl('Save').click()\n\n registry = getUtility(IRegistry)\n settings = registry.forInterface(IImageWatchDogSettings)\n self.assertTrue(settings.optimize)\n self.assertTrue(settings.enabled)", "def valuechange():\n\n tempmin.setMaximum(tempmax.value())\n tempmax.setMinimum(tempmin.value())\n hummin.setMaximum(hummax.value())\n hummax.setMinimum(hummin.value())\n\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmin\"\n ] = tempmin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmax\"\n ] = tempmax.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummin\"\n ] = hummin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummax\"\n ] = hummax.value()\n\n max = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummax\", hummax.value()),\n )\n min = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummin\", hummin.value()),\n )\n\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], max\n )\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], min\n )", "def on_spect_plot_settings_triggered(self):\n\n # Set current parameters from spectrogram plot widget class\n self.spectrogramTab.plotSettings.set_dialog_data()\n self.spectrogramTab.plotSettings.show()", "def on_config_change(self, config, section, key, value):\n \n if section == \"Makesmith Settings\":\n if key == \"COMport\":\n self.data.comport = value\n elif key == 'xPitch':\n print \"xPitch changed\"", "def handle_config_change(self, msg):\n self.xmpp.event('groupchat_config_status', msg)\n self.xmpp.event('muc::%s::config_status' % msg['from'].bare , msg)", "def monitor(self):", "def on_logger_plot_settings_triggered(self):\n\n # Set current parameters from time series plot widget class\n self.rawDataModule.plotSettings.set_dialog_data()\n self.rawDataModule.plotSettings.show()", "def _on_config_changed(self, _):\n self._configure_pod()", "def preferences(self: Self, event: Event = None) -> None:\n c = self\n c.openLeoSettings()", "def set_monitor_mode(controller_name):\n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"down\"])\n subprocess.run([\"airmon-ng\", \"check\", \"kill\"])\n subprocess.run([\"iw\", wifi_name, \"set\", \"monitor\", \"none\"])\n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"up\"])", "def action_settings(self):\n\n cur_datadir = self.config.starbound_data_dir\n settings = SettingsDialog(self)\n settings.exec()\n new_datadir = self.config.starbound_data_dir\n if new_datadir:\n if cur_datadir != new_datadir:\n self.load_data()\n self.scene.refresh(self.data)\n else:\n self.close_world()\n\n # Make sure our menus are enabled/disabled as appropriate\n self.enforce_menu_state()\n\n # Re-focus the main window\n self.activateWindow()", "def monitor_environment(self):\n self.INPUT_DATA_SIZE = 71\n if self.pushButton_connect.isChecked():\n self.pushButton_reload.setEnabled(1)\n self.listWidget_link.setEnabled(1)", "def on_actionSettings_triggered(self):\n self.start_app(SettingsApp)", "def setMonitorParam(self, monName, *params):\n monitorRef = self._ShREEKMonitors.get(monName, None)\n if monitorRef == None:\n msg = \"Tried to configure Non-existent monitor:\"\n msg += \"\\n%s\\n\" % monName\n msg += \"Existing Monitors:\\n\"\n msg += str(self._ShREEKMonitors.keys())\n raise ShREEKException(\n msg, ClassInstance = self,\n MissingMonitor = monName,\n ValidMonitors = self._ShREEKMonitors.keys())\n monitorRef.addPositionalArg(*params)\n return" ]
[ "0.6998499", "0.69827586", "0.6755782", "0.66924405", "0.66287124", "0.6608246", "0.63384444", "0.62970036", "0.6287617", "0.6161331", "0.61515385", "0.6105742", "0.6073169", "0.6016056", "0.5997793", "0.59133375", "0.5907518", "0.5882294", "0.5860596", "0.58594835", "0.5811915", "0.5809165", "0.5806647", "0.5789716", "0.57880044", "0.5777685", "0.576336", "0.5741873", "0.5730978", "0.57074356" ]
0.7010722
0
Says that some fields are numbers, and that if we take their sum with the given coefficients, then we get zero.
def sum_is_zero( coefficients: Iterable[float], period_as_delimiter: bool = False, force_dollar_decimal: bool = False) -> Predicate: coefficients = tuple(coefficients) assert isinstance(coefficients, tuple) return sum_is_near_zero( coefficients, tolerance=0, taper=0, period_as_delimiter=period_as_delimiter, force_dollar_decimal=force_dollar_decimal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_array_sum_equals_one(self):\n plugin = NonLinearWeights(0.85)\n result = plugin.process(self.cube, self.coord_name)\n self.assertAlmostEqual(result.data.sum(), 1.0)", "def EvaluateFields(self, *float, **kwargs):\n ...", "def __float__(self) -> float:\n\n if self.isScalar():\n return float(self[()])\n else:\n raise ValueError(\"non-scalar coefficients are non-zero\")", "def get_zeros(self):\n return self.serie.isin([0]).sum()", "def nums(rec, field_names, filt=float):\n for fn in field_names:\n rec[fn] = num(rec.get(fn, \"\"), filt=filt)", "def non_specificity(self):\n result = 0\n for focal, value in self.items():\n if focal.cardinal > 0:\n result += value * math.log(focal.cardinal, 2)\n return round(result, 6)", "def test_default_zero_fields_validate(self):\r\n it = self.BigIntTest()\r\n it.validate()", "def sum(self):\n # skipna == True\n # only_numerical == True\n # skipna == True\n return self._lift(\"sum\")", "def test_coefficients(self):\n\n coefs = self.cs.coefficients\n\n self.assertEqual(coefs, (1, 0, 1, 0, 0, -1))", "def has_valid_sum(self):\n return 1 - MassFunction.precision <= self._sum() <= 1 + MassFunction.precision", "def decimal_sum(*args):\n res = 0\n for numb in args:\n try:\n res += Decimal(str(numb))\n except:\n print(f\"Argument [ {numb} ] is skipped... not a float\")\n return res", "def __float__(self):\n\t\toutput = 0.0\n\n\t\tfor index,value in enumerate(self):\n\t\t\tif value > 0:\n\t\t\t\toutput+=float(value) * (10 ** -index)\n\n\t\treturn output", "def isIsotropic( self ) :\n\n for coefficient in self[1:] :\n if( coefficient != 0. ) : return( False )\n return( True )", "def non_zero_components(self) :\n return self.__coefficients.keys()", "def _noFloats( self, numerator, denominator ):\r\n\t\tif ( type( numerator ) == types.ComplexType ):\r\n\t\t\tnumerator *= ( 10 ** FRACTION_FLOAT_ACCURACY )\r\n\t\telse:\r\n\t\t\tnumerator = long( round( numerator * ( 10 ** FRACTION_FLOAT_ACCURACY ) ) )\r\n\t\tif ( type( denominator ) == types.ComplexType ):\r\n\t\t\tdenominator *= ( 10 ** FRACTION_FLOAT_ACCURACY )\r\n\t\telse:\r\n\t\t\tdenominator = long( round( denominator * ( 10 ** FRACTION_FLOAT_ACCURACY ) ) )\r\n\t\treturn numerator, denominator", "def is_zero(self):\n return float(self.coeff.nominator) / self.coeff.denominator == 0.0", "def sumation(cmds):\n numbers = []\n sum = 0\n try: \n for i in cmds[1:]:\n numbers.append(float(i))\n for l in numbers:\n sum = sum + l\n print(sum)\n except TypeError:\n print(\"Hmmm, I guess you haven't only entered valid numbers\")", "def sum_is_positive(\n coefficients: Iterable[float],\n strict: bool = True,\n period_as_delimiter: bool = False,\n force_dollar_decimal: bool = False) -> Predicate:\n\n coefficients = tuple(coefficients)\n assert isinstance(coefficients, tuple)\n\n return sum_is_at_least(\n 0, coefficients, strict,\n period_as_delimiter, force_dollar_decimal)", "def sum_gt_zero(x):\r\n s = x[0] + x[1]\r\n if s > 0.0:\r\n return 1.0\r\n return 0.0", "def test_default_zero_fields_validate(self):\r\n it = self.IntegerTest()\r\n it.validate()", "def coefficient(self) -> float:\n ...", "def number(self, ket):\n \n final = 0.0\n q = 0\n for i in ket:\n if i != 0:\n final += 2**q\n q += 1 \n return final", "def __init__(self, coefficients):\n \n if not isinstance(coefficients, list):\n raise TypeError(\"The coefficients variable must be a list of numbers\")\n\n if sum(coefficients) != 1:\n raise ValueError(\"The sum of coefficients must be equal to 1\")\n\n self.coef = coefficients\n self.samples = [0.0] * len(self.coef)", "def zero(x):\n # TODO: get dtype from Expr and Matrix:\n return x * 0", "def test_float(self):\n self.assertFalse(validate_measure_input('0.0', self.measures))\n self.assertFalse(validate_measure_input('1.0', self.measures))\n self.assertFalse(validate_measure_input('1.1', self.measures))", "def aerosols(self):\n return 1.", "def somme(self) -> Numeric:\n return query_sum(\n self.offre_set.filter(valide=True),\n \"prix\",\n output_field=models.DecimalField(),\n )", "def is_zero(self) -> bool:\n return self.field.zero == self", "def is_summable(self):\n return (self.type in ['float', 'int']) and (self.unit not in [u'%', u'°', u'°C', u'°F'])", "def fraction_nonzero(module):\n return count_params_nonzero(module) / count_params(module)" ]
[ "0.554289", "0.54029125", "0.5320744", "0.52942926", "0.5283152", "0.5266309", "0.5187975", "0.51559705", "0.51484704", "0.512571", "0.5122701", "0.51117337", "0.5079821", "0.5078284", "0.50656897", "0.506009", "0.50569785", "0.5051165", "0.5039959", "0.50171083", "0.50089896", "0.49669802", "0.49484348", "0.49114966", "0.4911423", "0.49072683", "0.48965546", "0.48894525", "0.48891976", "0.487732" ]
0.56769276
0
Says that some fields are numbers, and that if we take their sum with the given coefficients, then we get a number greater than (or equal to) the given bound.
def sum_is_at_least( lower_bound: float, coefficients: Iterable[float], strict: bool = True, period_as_delimiter: bool = False, force_dollar_decimal: bool = False) -> Predicate: coefficients = tuple(coefficients) assert isinstance(coefficients, tuple) return SumIsAtLeast( name=f'sum_is_at_least(' f'lower_bound={lower_bound}, ' f'coefficients={coefficients}, ' f'strict={strict}, ' f'period_as_delimiter={period_as_delimiter}, ' f'force_dollar_decimal={force_dollar_decimal})', lower_bound=lower_bound, coefficients=coefficients, strict=strict, period_as_delimiter=period_as_delimiter, force_dollar_decimal=force_dollar_decimal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lower_bound(self) -> float:\n ...", "def EvaluateFields(self, *float, **kwargs):\n ...", "def compute_bounds(self, weights, add_coeff, dual_vars, lower_bounds, upper_bounds, store_primal=False):\n x0_coeff = -weights[0].backward(dual_vars.mus[0])\n x0 = torch.where(x0_coeff >= 0, lower_bounds[0].unsqueeze(1), upper_bounds[0].unsqueeze(1))\n bound = utils.bdot(x0, x0_coeff)\n if store_primal:\n self.bounds_primal = x0\n else:\n del x0\n del x0_coeff\n\n for lay_idx in range(1, len(weights)):\n lbs = lower_bounds[lay_idx].unsqueeze(1).clamp(None, 0)\n ubs = upper_bounds[lay_idx].unsqueeze(1).clamp(0, None)\n neg_bias = ((lbs * ubs) / (ubs - lbs))\n neg_bias.masked_fill_(ubs == lbs, 0) # cover case in which ubs & lbs coincide\n bound += utils.bdot(dual_vars.lambdas[lay_idx - 1].clamp(0, None), neg_bias)\n bound -= utils.bdot(dual_vars.mus[lay_idx - 1], weights[lay_idx - 1].get_bias())\n\n bound += utils.bdot(add_coeff, weights[-1].get_bias())\n return bound", "def __ge__(self,b):\n\n if (MODE_RELAXED_WITH_ERROR_CHECKING):\n if (isinstance(b,int) | isinstance(b,float)):\n return(self.val() >= b)\n return (self.val() >= b.val())", "def __gt__(self, *args):\n return _ida_hexrays.fnumber_t___gt__(self, *args)", "def check_for_float_and_int(check):", "def byNumberIsGreaterOrEqual(self, paramName, value, invert=False):\n\t\timport revitron\n\t\tvalue = float(value)\n\t\tself.applyFilter(\n\t\t revitron.DB.FilterDoubleRule,\n\t\t paramName,\n\t\t value,\n\t\t revitron.DB.FilterNumericGreaterOrEqual(),\n\t\t invert\n\t\t)\n\t\treturn self", "def check(self):\n self.lower_bound(5e-4)\n self.upper_bound(5e2)", "def upper_bound(self) -> float:\n ...", "def byNumberIsGreater(self, paramName, value, invert=False):\n\t\timport revitron\n\t\tvalue = float(value)\n\t\tself.applyFilter(\n\t\t revitron.DB.FilterDoubleRule,\n\t\t paramName,\n\t\t value,\n\t\t revitron.DB.FilterNumericGreater(),\n\t\t invert\n\t\t)\n\t\treturn self", "def McSherryLessIsBetter(caseAttrib, queryValue, maxValue, minValue, weight):\n try:\n # build query string\n queryFnc = {\n \"script_score\": {\n \"query\": {\n \"exists\": {\n \"field\": caseAttrib\n }\n },\n \"script\": {\n \"source\": \"((float)(Math.max(params.max,params.queryValue) - doc[params.attrib].value) / (float)(Math.max(params.max,params.queryValue) - Math.min(params.min,params.queryValue))) * params.weight\",\n \"params\": {\n \"attrib\": caseAttrib,\n \"queryValue\": queryValue,\n \"max\": maxValue,\n \"min\": minValue,\n \"weight\": weight\n }\n },\n \"_name\": caseAttrib\n }\n }\n return queryFnc\n\n except ValueError:\n print(\"McSherryLessIsBetter() is only applicable to numbers\")", "def upper_bound_of_coefficient(f):\n weight = 0\n for c in f.itercoefficients():\n weight += abs(c)**2\n weight = arith1.floorsqrt(weight) + 1\n degree = f.degree()\n lc = f[degree]\n m = (degree >> 1) + 1\n bound = 1\n for i in range(1, m):\n b = combinatorial.binomial(m - 1, i) * weight + \\\n combinatorial.binomial(m - 1, i - 1) * lc\n if bound < b:\n bound = b\n return bound", "def test_b1_calc_bounds_row(self):\n type_of = \"r\"\n self.assert_calc_bounds(type_of)", "def limit_by(self, field, lower_bound=None, upper_bound=None):\n if lower_bound is not None and upper_bound is not None:\n indices = (self[field] >= lower_bound) \\\n & (self[field] <= upper_bound)\n elif lower_bound is None:\n indices = self[field] <= upper_bound\n elif upper_bound is None:\n indices = self[field] >= lower_bound\n else:\n raise ValueError(\"One bound must be set!\")\n return self[indices]", "def McSherryMoreIsBetter(caseAttrib, queryValue, maxValue, minValue, weight):\n try:\n # build query string\n queryFnc = {\n \"script_score\": {\n \"query\": {\n \"exists\": {\n \"field\": caseAttrib\n }\n },\n \"script\": {\n \"source\": \"(1 - ((float)(Math.max(params.max,params.queryValue) - doc[params.attrib].value) / (float)(Math.max(params.max,params.queryValue) - Math.min(params.min,params.queryValue)) )) * params.weight\",\n \"params\": {\n \"attrib\": caseAttrib,\n \"queryValue\": queryValue,\n \"max\": maxValue,\n \"min\": minValue,\n \"weight\": weight\n }\n },\n \"_name\": caseAttrib\n }\n }\n return queryFnc\n\n except ValueError:\n print(\"McSherryMoreIsBetter() is only applicable to numbers\")", "def monetary_amount_valid(record, field_name='price', min=1, max=10):\n monetary_amount = record[field_name]\n assert isinstance(monetary_amount, float)\n string_price = str(monetary_amount)\n decimal = string_price.split(\".\")[1]\n assert min <= monetary_amount <= max and len(decimal) <= 2", "def ge(value, limit):\n return value >= limit", "def upper_bound(power=5):\n total_digits = 1\n while True:\n max_sum = 9**power * total_digits\n if len(str(max_sum)) < total_digits:\n return max_sum\n total_digits += 1", "def constrain(inputVal, lower_limit, upper_limit):\n \n if (inputVal < lower_limit):\n return lower_limit\n elif (inputVal > upper_limit):\n return upper_limit\n else:\n return inputVal", "def bCheck(c, v, p, b):\n val = (v+1).floor()\n deg = c.degree()\n coeffs = c.coefficients(sparse=False)\n lcoeff = coeffs[deg]; coeffs.remove(lcoeff)\n check1 = [(coeffs[i].valuation(p) - lcoeff.valuation(p))/(deg - i) for i in range(0,len(coeffs)) if coeffs[i] != 0]\n check2 = (val - lcoeff.valuation(p))/deg\n check1.append(check2)\n bval = min(check1)\n return (bval).ceil()", "def mongodb_int_filter(base_field, base_field_type):\n q = ''\n if base_field != '':\n if base_field_type == '1': # =\n q = float(base_field)\n if base_field_type == '2': # >\n q = {'$gt': float(base_field)}\n if base_field_type == '3': # >=\n q = {'$gte': float(base_field)}\n if base_field_type == '4': # <\n q = {'$lt': float(base_field)}\n if base_field_type == '5': # <=\n q = {'$lte': float(base_field)}\n return q", "def check_for_float(check):", "def ExactFloat(caseAttrib, queryValue, weight):\n# build query string\n queryFnc = {\n \"script_score\": {\n \"query\": {\n \"match\": {\n caseAttrib: queryValue\n }\n },\n \"script\": {\n \"params\": {\n \"attrib\": caseAttrib,\n \"queryValue\": queryValue,\n \"weight\": weight\n },\n \"source\": \"if (Math.abs(params.queryValue - doc[params.attrib].value) < 0.0001) {return (1.0 * params.weight) }\"\n },\n \"_name\": caseAttrib\n }\n }\n return queryFnc", "def RestrictionRangeBound(self, compsIdList, lowerBound, upperBound):\n for i in range(len(compsIdList)): compsIdList[i] -= 1\n if self.solverTypeOptimize:\n self.solver.add(sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in range(self.nrVM)]) >= lowerBound)\n else:\n self.solver.assert_and_track(\n PbGe(sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in range(self.nrVM)]),\n lowerBound), \"LabelRangeBound: \" + str(self.labelIdx))\n self.labelIdx += 1\n if self.solverTypeOptimize:\n PbLe(self.solver.add(sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in range(self.nrVM)]),\n upperBound))\n else:\n self.solver.assert_and_track(\n sum([self.a[compId * self.nrVM + j] for compId in compsIdList for j in range(self.nrVM)]) <= upperBound, \"LabelRangeBound: \" + str(self.labelIdx))\n self.labelIdx += 1", "def _assert_bounds_are_valid(\n self, new_bounds: devices.PrimaryBounds\n ) -> None:\n correct_length = len(new_bounds) == self.nprimaries\n tuples_of_float = all(\n [\n isinstance(item, tuple) and isinstance(b, (float, np.floating))\n for item in new_bounds\n for b in item\n ]\n )\n in_gamut = all([b[0] >= 0.0 and b[1] <= 1.0 for b in new_bounds])\n\n if not all([correct_length, tuples_of_float, in_gamut]):\n print(self.__class__.bounds.__doc__)\n raise SilSubProblemError(\"Invalid input for bounds (see above).\")", "def numeric_check(param, name):\n\tif not isinstance(param, numbers.Number):\n\t\traise TypeError(\"Keyword arg '%s' must be a real number. Got: %s\" % (\n\t\t\tname, type(param)))\n\telse:\n\t\tpass", "def __allowed_values_correct_number(self):\n strTestName = 'Values of a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'number #1')\n RxCSObject.paramAddMan('parameter2', 'number #2')\n RxCSObject.paramAllowed('parameter2', range(10))\n\n RxCSObject.parameter1 = 11\n RxCSObject.parameter2 = 0\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def __relational_restriction_correct_parameter_vs_number(self):\n strTestName = 'A parameter higher than a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('iParameter1', 'Int parameter')\n RxCSObject.paramType('iParameter1', int)\n RxCSObject.paramH('iParameter1', 2)\n\n RxCSObject.iParameter1 = 4\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def InrecaLessIsBetter(caseAttrib, queryValue, maxValue, jump, weight):\n try:\n queryValue = float(queryValue)\n # build query string\n queryFnc = {\n \"script_score\": {\n \"query\": {\n \"exists\": {\n \"field\": caseAttrib\n }\n },\n \"script\": {\n \"source\": \"if (doc[params.attrib].value <= params.queryValue) { return (1.0 * params.weight) } if (doc[params.attrib].value >= (Math.max(params.max,params.queryValue)) { return 0 } return (params.jump * (float)((Math.max(params.max,params.queryValue) - doc[params.attrib].value) / (float)((Math.max(params.max,params.queryValue) - params.queryValue)) * params.weight\",\n \"params\": {\n \"attrib\": caseAttrib,\n \"queryValue\": queryValue,\n \"jump\": jump,\n \"max\": maxValue,\n \"weight\": weight\n }\n },\n \"_name\": caseAttrib\n }\n }\n return queryFnc\n\n except ValueError:\n print(\"InrecaLessIsBetter() is only applicable to numbers\")", "def __gt__(self, *args):\n return _ida_hexrays.cnumber_t___gt__(self, *args)" ]
[ "0.5581768", "0.5425924", "0.5421908", "0.53738344", "0.52734053", "0.52379966", "0.5235274", "0.51687074", "0.51636076", "0.515199", "0.51004237", "0.50439686", "0.5040996", "0.5032745", "0.50315386", "0.50006354", "0.4998004", "0.498631", "0.49774936", "0.49712873", "0.49681124", "0.49370933", "0.49355793", "0.49257994", "0.49201873", "0.492002", "0.4910426", "0.489609", "0.48922548", "0.48851657" ]
0.5817751
0
Says that a field is equal to the given amount, within a certain tolerance.
def is_nearly_equal_to( amount: float, tolerance: float = 0.5, taper: float = 0.5, period_as_delimiter: bool = False, force_dollar_decimal: bool = False) -> Predicate: return sum_is_approximately( amount, [1], tolerance, taper, period_as_delimiter, force_dollar_decimal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_equal_to(amount: float) -> Predicate:\n return is_nearly_equal_to(amount, tolerance=0, taper=0)", "def test_amount_in_tons(self):", "def equals_exact(self, other, tolerance): # -> bool:\n ...", "def field_strength_close_enough(field_strength, desired_value):\n\n if field_strength > 100: # assume it is in mT instead of Teslas\n field_strength /= 1000 # and turn it into Teslas\n\n diff = abs(field_strength - desired_value)\n\n if diff < 0.2:\n return True\n else:\n return False", "def approx_eq(x, y, tolerance=1e-15):\n return abs(x - y) < tolerance", "def approx_eq(x, y, tolerance = 0.000001):\n\treturn abs(x - y) < tolerance", "def checkAnswer(comment,value,expected,results,tol=1e-10):\n if abs(value - expected) > tol:\n print(\"checking answer\",comment,value,\"!=\",expected)\n results[\"fail\"] += 1\n else:\n results[\"pass\"] += 1", "def assert_equals(expected, received):\n # You don't need to understand how this function works\n # just look at its documentation!\n if type(expected) == 'float' or type(received) == 'float':\n match = math.isclose(expected, received)\n else:\n match = expected == received\n\n assert match, f'Failed: Expected {expected}, but received {received}'", "def eq(a, b):\n return abs(a - b) < .05", "def almost_equals(self, other, decimal=...): # -> bool:\n ...", "def test_vat_float(self):\n value = 412.32\n self.assertAlmostEqual(cr.vat(value), value * 0.15, places=2)", "def Catch(X,Tolerance=0):\n if X < (.5-(Tolerance/2)):\n return(0)\n elif X > (.5+(Tolerance/2)):\n return(1)\n else:\n return(.5)", "def testConsistency(self):\n #self.assertAlmostEqual(self.fxlinkedcashflow.amount(),0)", "def check_compare_decrease(out_fields):\n change = out_fields[CHANGE_FLD]\n expected_change = -25.00\n check_compare(change, expected_change)\n check_float_value(change, CHANGE_FLD)", "def test_ge_1():\n a = FixedPoint(1, 'Q2.8')\n assert a > 0.9", "def check_compare_grow(out_fields):\n change = out_fields[CHANGE_FLD]\n expected_change = 50.00\n check_compare(change, expected_change)\n check_float_value(change, CHANGE_FLD)", "def test_amount_not_enough(self):\n item, change, _ = give_item_and_change('coke', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def test_gt_1():\n a = FixedPoint(1, 'Q2.8')\n assert a > 0.9", "def tolerance(self, tolerance: float) -> None:\n self._tolerance = tolerance", "def bet(self, amount):\r\n\r\n if self.players[self.active_player].credits < self.big_blind:\r\n message = \"Player {} won! Not enough money remaining.\".format(self.players[(self.active_player + 1) %\r\n len(self.players)].name)\r\n self.game_message.emit(message)\r\n self.restart()\r\n if self.players[(self.active_player + 1) % len(self.players)].credits < self.big_blind:\r\n message = \"Player {} won! Not enough money remaining.\".format(self.players[self.active_player].name)\r\n self.game_message_warning.emit(message)\r\n self.restart()\r\n\r\n if amount == 0:\r\n message = \"Raises must be larger than zero!\"\r\n self.game_message_warning.emit(message)\r\n\r\n elif self.previous_bet + amount > self.players[self.active_player].credits:\r\n message = \"Not enough money!\"\r\n self.game_message_warning.emit(message)\r\n else:\r\n self.pot += amount\r\n self.new_pot.emit()\r\n\r\n self.players[self.active_player].credits -= (self.previous_bet + amount)\r\n self.new_credits.emit()\r\n\r\n output_text = \"{} bet ${} and raised ${}\".format(self.players[self.active_player].name, self.previous_bet,\r\n amount)\r\n\r\n self.previous_bet = (self.previous_bet + amount)\r\n self.actions += 1\r\n\r\n self.new_output.emit(output_text)\r\n\r\n self.active_player = (self.active_player + 1) % len(self.players)\r\n\r\n # Update the players to hide their cards when it is not their turn\r\n for player in self.players:\r\n player.flip_cards()\r\n\r\n self.progress_game()", "def checkFloat(comment, value, expected, tol=1e-10, update=True):\n if np.isnan(value) and np.isnan(expected):\n res = True\n elif np.isnan(value) or np.isnan(expected):\n res = False\n else:\n res = abs(value - expected) <= tol\n if update:\n if not res:\n print(\"checking float\",comment,'|',value,\"!=\",expected)\n results[\"fail\"] += 1\n else:\n results[\"pass\"] += 1\n return res", "def eq(value, limit):\n return value == limit", "def assertEqualToWithin(self, a, b, ratio=1e-7, msg=None):\n\t\tif msg is None:\n\t\t\tmsg = \"%s != %s to within %s of the sum\"%(a, b, ratio)\n\t\tdenom = abs(a+b)\n\t\tself.failUnless(abs(a-b)/denom<ratio, msg)", "def test_setMassFrac(self):\n target35 = 0.2\n self.fuel.setMassFrac(\"U235\", target35)\n self.assertAlmostEqual(self.fuel.getMassFrac(\"U235\"), target35)", "def tol(self, value):\n self._tol = value", "def check_for_float(check):", "def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12", "def test_ge(self):\n f12: Fraction = Fraction(1, 2)\n f34: Fraction = Fraction(3, 4)\n f93: Fraction = Fraction(9, 3)\n f124: Fraction = Fraction(12, 4)\n self.assertTrue(f12 >= f34)\n self.assertTrue(f93 >= f124)\n self.assertFalse(f93 >= f12)", "def distance_tolerance(distance: float) -> float:\n ret = 10.0\n if distance < 0:\n ret += distance * (100 - ret) / -2500.0\n return ret", "def assertApproxEqual(self, actual, desired, significant=7, msg=\"\"):\n np.testing.assert_approx_equal(\n actual=actual,\n desired=desired,\n significant=significant,\n err_msg=msg,\n verbose=True,\n )" ]
[ "0.6836401", "0.62156755", "0.607548", "0.5973551", "0.5941763", "0.5798628", "0.56204855", "0.56173027", "0.56075376", "0.5509888", "0.55034184", "0.54373276", "0.54291254", "0.54259545", "0.54057163", "0.5404048", "0.53798753", "0.5368738", "0.5363665", "0.53579444", "0.53531784", "0.5340448", "0.5337919", "0.53257036", "0.52722794", "0.52721715", "0.52719384", "0.52672243", "0.52549946", "0.52441496" ]
0.6630341
1
Says that a field is exactly equal to some constant amount.
def is_equal_to(amount: float) -> Predicate: return is_nearly_equal_to(amount, tolerance=0, taper=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testConsistency(self):\n #self.assertAlmostEqual(self.fxlinkedcashflow.amount(),0)", "def almost_equals(self, other, decimal=...): # -> bool:\n ...", "def test_amount_not_enough(self):\n item, change, _ = give_item_and_change('coke', .50)\n self.assertIsNone(item)\n self.assertEqual(change, 0.5)", "def mempool_assert_my_amount(condition: ConditionWithArgs, unspent: CoinRecord) -> Optional[Err]:\n if unspent.coin.amount != int_from_bytes(condition.vars[0]):\n return Err.ASSERT_MY_AMOUNT_FAILED\n return None", "def monetary_amount_valid(record, field_name='price', min=1, max=10):\n monetary_amount = record[field_name]\n assert isinstance(monetary_amount, float)\n string_price = str(monetary_amount)\n decimal = string_price.split(\".\")[1]\n assert min <= monetary_amount <= max and len(decimal) <= 2", "def test_amount_in_tons(self):", "def check_required_change(drink, amount):\n if (drink == \"espresso\" and amount > MENU[drink][\"cost\"]) or (drink == \"latte\" and amount > MENU[drink][\"cost\"])\\\n or (drink == \"cappuccino\" and amount > MENU[drink][\"cost\"]):\n return amount - MENU[drink][\"cost\"]\n else:\n return 0.00", "def eq(value, limit):\n return value == limit", "def check_funds(self, amount):\n if abs(amount)>self.get_balance(): return False\n else: return True", "def is_nearly_equal_to(\n amount: float,\n tolerance: float = 0.5,\n taper: float = 0.5,\n period_as_delimiter: bool = False,\n force_dollar_decimal: bool = False) -> Predicate:\n return sum_is_approximately(\n amount, [1], tolerance, taper, period_as_delimiter,\n force_dollar_decimal)", "def check_compare_grow(out_fields):\n change = out_fields[CHANGE_FLD]\n expected_change = 50.00\n check_compare(change, expected_change)\n check_float_value(change, CHANGE_FLD)", "def assert_equals(expected, received):\n # You don't need to understand how this function works\n # just look at its documentation!\n if type(expected) == 'float' or type(received) == 'float':\n match = math.isclose(expected, received)\n else:\n match = expected == received\n\n assert match, f'Failed: Expected {expected}, but received {received}'", "def update_amounts(self, save=True):\n self.amount_donated = self.get_amount_total(\n [StatusDefinition.SUCCESS, StatusDefinition.PENDING,\n StatusDefinition.PLEDGED])\n self.amount_needed = self.amount_asked - self.amount_donated\n\n if self.amount_needed < 0:\n # Should never be less than zero\n self.amount_needed = 0\n\n if save:\n self.save()", "def check_cap(org, amount):\n from django.db.models import Sum, Q\n\n if amount < 0:\n query = Q(favor__lt=0)\n else:\n query = Q(favor__gt=0)\n total = abs(\n org.reputations.filter(query).aggregate(sum=Sum(\"favor\"))[\"sum\"] or 0\n ) + abs(amount)\n mod = org.social_modifier * 5\n if total > mod:\n noun = \"favor\" if amount > 0 else \"disfavor\"\n raise CommandError(\n \"That would bring your total %s to %s, and you can only spend %s.\"\n % (noun, total, mod)\n )", "def test_calculate_contract_fee(a, b, expected):\n assert calculate_contract_fee(a, b) == expected", "def field_strength_close_enough(field_strength, desired_value):\n\n if field_strength > 100: # assume it is in mT instead of Teslas\n field_strength /= 1000 # and turn it into Teslas\n\n diff = abs(field_strength - desired_value)\n\n if diff < 0.2:\n return True\n else:\n return False", "def validate(self):\n if self.amount > 0:\n return True\n return False", "def equals_exact(self, other, tolerance): # -> bool:\n ...", "def assertApproxEqual(self, actual, desired, significant=7, msg=\"\"):\n np.testing.assert_approx_equal(\n actual=actual,\n desired=desired,\n significant=significant,\n err_msg=msg,\n verbose=True,\n )", "def fixed_amount(self, fixed_amount):\n\n self._fixed_amount = fixed_amount", "def fixed_amount(self, fixed_amount):\n\n self._fixed_amount = fixed_amount", "def check_funds(self, amount):\n if amount > self.get_balance():\n return False\n else:\n return True", "def test_eq_1():\n a = FixedPoint(1, 'Q2.8')\n assert a == 1", "def realEqual(x,y,eps=10e-10):\n return abs(x-y) < eps", "def transaction_successful(drink_type):\r\n total = 0\r\n cost = MENU[drink_type][\"cost\"]\r\n print(f\" A {drink_type} costs ${MENU[drink_type]['cost']}\")\r\n total += float(input(\" How many quarters? \")) * 0.25\r\n total += float(input(\" How many dimes? \")) * 0.10\r\n total += float(input(\" How many nickels? \")) * 0.05\r\n total += float(input(\" How many pennies? \")) * 0.01\r\n\r\n if total >= cost:\r\n print(f\"Here is ${total - cost} in change.\")\r\n return True\r\n else:\r\n print(\"Sorry that's not enough money. Money refunded.\")\r\n return False", "def contribute(k):\n global amount\n global _alarm\n amount = amount + k\n # remove the alarm\n if amount >= initial_amount * 0.3:\n _alarm = False", "def check_price(self):\n return self.day*self.price", "def test_calculate_retention_fee():\n assert calculate_retention_fee(2578) == Decimal('128.91')", "def eq(a, b):\n return abs(a - b) < .05", "def verifyField(self, pv, field, reference):\n full_pv = pv + \".\" + field\n if (caget(full_pv) != reference):\n msg = \"ERROR: \" + full_pv + \" not equal to \" + str(reference)\n raise Exception(__name__ + msg)\n\n return self.__g.SUCCESS" ]
[ "0.55429107", "0.54905224", "0.5480013", "0.53962016", "0.53881603", "0.5347809", "0.5342059", "0.5293009", "0.52919525", "0.5226928", "0.5202861", "0.51783156", "0.51050514", "0.50568867", "0.50500894", "0.5043599", "0.502004", "0.5019618", "0.50104225", "0.4995071", "0.4995071", "0.4994224", "0.49893135", "0.4971217", "0.49705514", "0.4935379", "0.4935342", "0.49323744", "0.4912972", "0.4897868" ]
0.60709715
0
Says that a field is greater than some constant amount.
def is_greater_than( amount: float, strict: bool = True, period_as_delimiter: bool = False, force_dollar_decimal: bool = False) -> Predicate: return sum_is_at_least( amount, [1], strict, period_as_delimiter, force_dollar_decimal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gt(value, limit):\n return value > limit", "def set_GreaterThan(self, value):\n super(MoneyReceivedInputSet, self)._set_input('GreaterThan', value)", "def _check_amount_with_priority(self):\n\t\tfor line in self:\n\t\t\tif line.tax_slide_id and (line.amount_from > line.tax_slide_id.max_amount\n\t\t\t or line.amount_to > line.tax_slide_id.max_amount):\n\t\t\t\traise Warning(_(\"Line Amount couldn't exceed te slide max amount [%s]\" % line.tax_slide_id.max_amount))", "def greater_than(self) -> global___Expression:", "def _on_order_amount_too_low(self, _msg):\r\n self.debug(\"### Server said: 'Order amount is too low'\")\r\n self.count_submitted -= 1", "def set_GreaterThanEqualTo(self, value):\n super(MoneyReceivedInputSet, self)._set_input('GreaterThanEqualTo', value)", "def _check_amount_with_priority(self):\n\t\tfor slc in self:\n\t\t\tif slc.max_amount and self.search([('priority', '<', slc.priority), ('max_amount', '>=', slc.max_amount)]):\n\t\t\t\traise Warning(_(\"There are below slides [Priority less than %s] with bigger amount from [%s]\"\n\t\t\t\t \" which against the logic!!!\\n You can increase amount or handel priority\")\n\t\t\t\t % (slc.priority, slc.max_amount))", "def is_less_than(\n amount: float,\n strict: bool = True,\n period_as_delimiter: bool = False,\n force_dollar_decimal: bool = False) -> Predicate:\n return sum_is_at_least(\n -amount, [-1], strict, period_as_delimiter,\n force_dollar_decimal)", "def ge(value, limit):\n return value >= limit", "def __gt__(self, other):\n return self.greaterThan(other)", "def test_greater_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"greaterThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::gt\"},\n )", "def _greater_than_op(spec):", "def __gt__(self, *args):\n return _ida_hexrays.fnumber_t___gt__(self, *args)", "def __gt__(self, other):\n if isinstance(other, float):\n return self.floatvalue > other\n else:\n return not self.negative and not self == other", "def assertGreater(self, a, b, msg=None):\r\n if not a > b:\r\n standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))\r\n self.fail(self._formatMessage(msg, standardMsg))", "def assertGreater(self, a, b, msg=None):\n if not a > b:\n standardMsg = '%s not greater than %s' % (safe_repr(a), safe_repr(b))\n self.fail(self._formatMessage(msg, standardMsg))", "def __gt__(self, other):\n return self.__f > other.get_f()", "def __gt__(self, obj):\r\n return assert_(self.obj > obj, '%r <= %r' % (self.obj, obj))", "def __gt__(self, other):\n return self._metric_value > other.metric_value()", "def gt(self, val):\n\t\treturn GreaterThan(self, val)", "def __gt__(self, *args):\n return _ida_hexrays.hexwarn_t___gt__(self, *args)", "def __gt__(self, other):\n self.conds.append((self.name, '>', other))\n return self", "def test_gt():\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n assert num_a.value > 1", "def __gt__(self, other):\n return self.weight > other.weight", "def search_UI_transaction_bigger_before_day(account):\n\t_day = read_day()\n\t_amount = read_amount()\n\tfound = search_transaction_bigger_before_day(account, _day, _amount, print_transaction)\n\tif (not found):\n\t\tprint('Nu exista nici o tranzactie efectuata inainte de ziua', \\\n\t\t\t\t\"%d cu suma mai mare de %f\" % (_day, _amount))", "def assertGreaterEqual(self, a, b, msg=None):\r\n if not a >= b:\r\n standardMsg = '%s not greater than or equal to %s' % (safe_repr(a), safe_repr(b))\r\n self.fail(self._formatMessage(msg, standardMsg))", "def greater(value, other):\n return value < other", "def __gt__(self, other):\n if not (isNumeric(other) or isinstance(other, Expression)):\n raise excep.biogemeError(\n f'This is not a valid expression: {other}'\n )\n return Greater(self, other)", "def __gt__(self,f2):\n return self.__num * f2.den > self.__den * f2.num", "def __ge__(self, other) -> bool:\n if isinstance(other, int) or isinstance(other, float):\n return self.balance >= other\n else:\n raise TypeError" ]
[ "0.6415876", "0.63352454", "0.61581963", "0.6144837", "0.6097271", "0.60745937", "0.6043975", "0.6001427", "0.5897026", "0.58857834", "0.57591", "0.5754323", "0.57372165", "0.5720173", "0.571576", "0.5690224", "0.56693393", "0.5642554", "0.56385976", "0.5630668", "0.56058604", "0.5604138", "0.55870664", "0.55510885", "0.55378854", "0.5536997", "0.5532034", "0.5515488", "0.5500996", "0.54996365" ]
0.66399515
0
Says that a field is less than some constant amount.
def is_less_than( amount: float, strict: bool = True, period_as_delimiter: bool = False, force_dollar_decimal: bool = False) -> Predicate: return sum_is_at_least( -amount, [-1], strict, period_as_delimiter, force_dollar_decimal)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _on_order_amount_too_low(self, _msg):\r\n self.debug(\"### Server said: 'Order amount is too low'\")\r\n self.count_submitted -= 1", "def _check_amount_with_priority(self):\n\t\tfor line in self:\n\t\t\tif line.tax_slide_id and (line.amount_from > line.tax_slide_id.max_amount\n\t\t\t or line.amount_to > line.tax_slide_id.max_amount):\n\t\t\t\traise Warning(_(\"Line Amount couldn't exceed te slide max amount [%s]\" % line.tax_slide_id.max_amount))", "def less_than(self) -> global___Expression:", "def set_LessThan(self, value):\n super(MoneyReceivedInputSet, self)._set_input('LessThan', value)", "def lt(value, limit):\n return value < limit", "def _check_amount_with_priority(self):\n\t\tfor slc in self:\n\t\t\tif slc.max_amount and self.search([('priority', '<', slc.priority), ('max_amount', '>=', slc.max_amount)]):\n\t\t\t\traise Warning(_(\"There are below slides [Priority less than %s] with bigger amount from [%s]\"\n\t\t\t\t \" which against the logic!!!\\n You can increase amount or handel priority\")\n\t\t\t\t % (slc.priority, slc.max_amount))", "def price_too_low(cls, min_price: int):\n def eval_fn(p: Posting):\n if p.price < min_price:\n return f\"The price (${p.price}) is suspiciously low.\"\n\n return cls(eval_fn)", "def check_min_value(self, tracked_price):\n if tracked_price < self.min_value and self.warning_calls <= 2:\n print(f'Warning! Price dropeed under {self.min_value} pln {tracked_price}')\n self.make_phone_call()\n self.warning_calls += 1\n elif tracked_price < self.min_value and self.warning_calls == 3:\n self.send_a_message(\n f'This is a warning message. Price of EUR/PLN dropped under critical value!'\n f' {self.min_value} pln')\n print(f'Called 3 times! Price dropeed under {self.min_value} pln {tracked_price}')\n self.warning_calls = 0\n else:\n print(f\"Current price for Euro in PLN is {tracked_price}\")", "def assertLessThan(self, first, second, msg=None):\n if not first < second:\n raise self.failureException, \\\n (msg or '%r >= %r' % (second, first))", "def ge(value, limit):\n return value >= limit", "def assert_less(self, a, b):\n if not a < b:\n raise AssertionError('%s not less than %s' % (str(a), str(b)))", "def para_lower_than(threshold):\n\n return lambda step, curr_obj, curr_optimized_obj, extra_para: extra_para<threshold", "def set_LessThanEqualTo(self, value):\n super(MoneyReceivedInputSet, self)._set_input('LessThanEqualTo', value)", "def test_less_than(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessThan\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::lt\"},\n )", "def assertLess(self, a, b, msg=None):\r\n if not a < b:\r\n standardMsg = '%s not less than %s' % (safe_repr(a), safe_repr(b))\r\n self.fail(self._formatMessage(msg, standardMsg))", "def is_greater_than(\n amount: float,\n strict: bool = True,\n period_as_delimiter: bool = False,\n force_dollar_decimal: bool = False) -> Predicate:\n return sum_is_at_least(\n amount, [1], strict,\n period_as_delimiter, force_dollar_decimal)", "def check_required_change(drink, amount):\n if (drink == \"espresso\" and amount > MENU[drink][\"cost\"]) or (drink == \"latte\" and amount > MENU[drink][\"cost\"])\\\n or (drink == \"cappuccino\" and amount > MENU[drink][\"cost\"]):\n return amount - MENU[drink][\"cost\"]\n else:\n return 0.00", "def get_cashflows_less_than(leg, run_date, cf_type):\n query = acm.CreateFASQLQuery('FCashFlow', 'AND')\n query.AddAttrNode('Leg.Oid', 'EQUAL', leg.Oid())\n query.AddAttrNode('PayDate', 'LESS', run_date)\n query.AddAttrNode('CashFlowType', 'EQUAL', \"Fixed Amount\")\n query.AddAttrNode('AdditionalInfo.PS_DepositType', 'EQUAL', cf_type)\n cashFlows = query.Select()\n return cashFlows", "def test_LessThan(self):\n self.assertTrue(Fraction(-7,10)<Fraction(15,10))", "def takes_less_than(self, max_time):\n return TakesLessThanCM(self, max_time)", "def less(value, other):\n return value > other", "def gt(value, limit):\n return value > limit", "async def handle_less(message: types.Message):\n await handle_change_threshold(message, 1 / 1.5)", "def test_check_min(self):\n\t\tself.filter.set_operator(\".min\")\n\t\tself.filter.set_limit(12)\n\t\tself.assertTrue(self.filter.check(Object(field=12)))\n\t\tself.assertTrue(self.filter.check(Object(field=15)))\n\t\tself.assertFalse(self.filter.check(Object(field=9)))", "def __ge__(self, other) -> bool:\n if isinstance(other, int) or isinstance(other, float):\n return self.balance >= other\n else:\n raise TypeError", "def constrain(amt,low,high):\n if amt < low:\n return low\n elif amt > high:\n return high\n else:\n return amt", "def test_lt():\n value = 42\n num_a = param.Integer(value=value)\n assert num_a.value == value\n\n assert num_a.value < 100", "def monetary_amount_valid(record, field_name='price', min=1, max=10):\n monetary_amount = record[field_name]\n assert isinstance(monetary_amount, float)\n string_price = str(monetary_amount)\n decimal = string_price.split(\".\")[1]\n assert min <= monetary_amount <= max and len(decimal) <= 2", "def __ge__(self,b):\n\n if (MODE_RELAXED_WITH_ERROR_CHECKING):\n if (isinstance(b,int) | isinstance(b,float)):\n return(self.val() >= b)\n return (self.val() >= b.val())", "def search_UI_transaction_bigger_before_day(account):\n\t_day = read_day()\n\t_amount = read_amount()\n\tfound = search_transaction_bigger_before_day(account, _day, _amount, print_transaction)\n\tif (not found):\n\t\tprint('Nu exista nici o tranzactie efectuata inainte de ziua', \\\n\t\t\t\t\"%d cu suma mai mare de %f\" % (_day, _amount))" ]
[ "0.6527394", "0.60434765", "0.59778047", "0.5974657", "0.590551", "0.58676517", "0.5851458", "0.58318037", "0.5665455", "0.5614151", "0.5609212", "0.55952173", "0.5593226", "0.5548188", "0.5531906", "0.5519", "0.5454931", "0.5441718", "0.54397845", "0.5428812", "0.5411934", "0.53833854", "0.5357245", "0.53555703", "0.53494817", "0.5341248", "0.53359216", "0.5335352", "0.53211385", "0.530416" ]
0.67949855
0
Returns a 3x3 pixel array representing a superposition of all current probabilities for this block. This function is called every frame for multiple blocks and singlehandedly slows the program down by an enormous amount, but its worth it for the cool animation effect
def superposition(self): superpos_array = [[0,0,0],[0,0,0],[0,0,0]] #check normalised: n = sum(self.block_weights) if n != 1: #normalise here if required self.block_weights = [x/n for x in self.block_weights] o = self.block_opts w = self.block_weights for i in range(TILE_SIZE): for j in range(TILE_SIZE): for k in range(len(o)): superpos_array[j][i] += 254*get_blocks(o[k])[j][i]*w[k] return superpos_array #propgate changes out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_block():\n final_locs = [[1 for x in range(LOC_SIZE)] for y in range(LOC_SIZE)]\n for a in range(int(LOC_SIZE / 2)):\n for b in range(a, int(LOC_SIZE / 2)):\n # creating and ringing each of the fleas individually\n print(a, b)\n locs = [[1 if x == a and y == b else 0 for x in range(LOC_SIZE)] for y in range(LOC_SIZE)]\n for i in range(50):\n locs = ring(locs)\n # finding complement of all probabilities to find probabilities of not having a flea there\n for r in range(LOC_SIZE):\n for s in range(LOC_SIZE):\n locs[r][s] = 1 - locs[r][s]\n # transposes and adds the set of probabilities to not have to recalculate for mirrored values\n if a != b:\n locs = operate_on_narray(locs, zip(*locs), lambda o, p: o*p)\n # multiplying the probabilities together\n final_locs = operate_on_narray(final_locs, locs, lambda o, p: o*p)\n return final_locs", "def prob_3_3(self):\n \n ###### START CODE HERE ######\n\n\n ###### END CODE HERE ######\n pass\n \n ###### return negativeImg ######", "def prepro(I):\n# I = env.reset() # Use this to verify, whats happening\n# plt.imshow(I)\n I = I[35:195] # crop and keep only the play area\n I = I[::2,::2,0] # downsample by factor of 2, take every second row and column, and take only \"R\" component out of RGB image\n I[I == 144] = 0 # erase background (background type 1)\n I[I == 109] = 0 # erase background (background type 2)\n I[I != 0] = 1 # everything else (but paddles, ball) just set to 1\n return I.astype(np.float).ravel() # convert to 1D array and return", "def start(self):\n self.frame = 0\n self._init_level(1)\n self.reward = 0\n self.pcontinue = 1\n self.ghost_speed = self.ghost_speed_init\n return self._make_image(), self.reward, self.pcontinue", "def __init__(self, dim):\n self.surface = pygame.Surface(dim)\n self.p_array = pygame.PixelArray(self.surface)\n self.p_array[0, 0] = (255, 255, 255)\n print(self.p_array.shape)\n # set some values\n self.width = self.surface.get_width()\n self.height = self.surface.get_height()\n # start in center of surface\n self.center = (self.width // 2, self.height // 2)\n self.fibonacci = get_fibonacci()\n self.framecount = 0\n self.initialize()", "def block(self, block: tuple) -> list:\n b = []\n for j in range(3):\n index = block * 3 + j * 9 + 18 * (block // 3)\n for val in self.grid[index:index+3]:\n b.append(val)\n return b", "def getState(game):\n pixels = pygame.surfarray.array3d(game.screen)[:]\n pixels = np.array([pixels], dtype=float)\n\n # Here we will preprocess the pixel data\n bitsize = game.screen.get_bitsize() / 4\n pixels *= 1 / 2**bitsize # Normalize to [0..1]\n\n return pixels", "def NewBlock(self):\n for i in self.matrix:\n if 2 in i:\n return()\n blockType = self.bag.Choose()\n subtractor = {\"I\" : 4, \"J\" : 3, \"L\" : 3, \"O\" : 2, \"S\" : 3, \"T\" : 3, \"Z\": 3}\n x = random.randint(0, self.width - subtractor.get(blockType))\n coords = []\n if blockType == \"I\":\n coords = [(x + i, 0) for i in range(4)]\n elif blockType == \"J\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x, 1))\n elif blockType == \"L\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x + 2, 1))\n elif blockType == \"O\":\n coords = [(x, 0), (x + 1, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"Z\":\n coords = [(x, 0), (x + 1, 0), (x + 1, 1), (x + 2, 1)]\n elif blockType == \"S\":\n coords = [(x + 1, 0), (x + 2, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"T\":\n coords = [(x, 0), (x + 1, 0), (x + 2, 0), (x + 1, 1)]\n self.coords = coords\n return(coords)", "def prob_3_1(self):\n \n ###### START CODE HERE ######\n\n\n ###### END CODE HERE ######\n pass\n \n ###### return swapImg ######", "def betas(self):\n b = np.zeros(self.d)\n if len(self.blocks) == 0: return np.array(list())\n for block in np.sort(np.array(list(self.blocks.keys()))): # map keys are not always sorted\n with self.block_locks[block]:\n for i, j in enumerate(self.block_idxs[block]):\n b[j] = np.copy(self.blocks[block][i])\n return np.array(b)", "def prettified_current_state(self):\n\n prettified = np.zeros((len(self.state) + 2,len(self.state[0]) + 2))\n\n for i in range(1, len(prettified) - 2):\n for j in range(1, len(prettified[0]) - 2):\n prettified[i, j] = self.state[i - 1, j - 1]\n\n x = np.linspace(-0.5, 0.5, self.width + 2)\n y = np.linspace(-0.5, 0.5, self.height + 2)\n\n X, Y = np.meshgrid(x, y)\n\n return X, Y, prettified", "def _pre_calc_player(self):\n self._player_scan_unshifted = []\n\n # Only 1,2,3 required, but 0..3 calculated\n NUMBER_RANGE = 4\n\n # Only 1,2,4 required, but 0..4 calculated\n SIZE_RANGE = 5\n\n # Gaps are 0, 2, 4, 8\n GAP_RANGE = 9\n\n GRAPHIC_RANGE = 256\n\n # Create enough empty lists to allow direct indexing.\n self._player_scan_unshifted = [[] for x in range(NUMBER_RANGE)]\n for number in [1,2,3]:\n\n self._player_scan_unshifted[number] = [[] for x in range(SIZE_RANGE)]\n for size in [1,2,4]:\n\n self._player_scan_unshifted[number][size] = [[] for x in range(GAP_RANGE)]\n for gap in [0,2,4,8]:\n self._player_scan_unshifted[number][size].append([])\n for reflect in range(2):\n self._player_scan_unshifted[number][size][gap].append([])\n for g in range(GRAPHIC_RANGE):\n # Create the 8-bit 'graphic'\n graphic = [False] * 8\n for i in range(8):\n if (g >> i) & 0x01:\n graphic[i] = True\n\n if reflect:\n graphic.reverse()\n\n # Scale the graphic, so each pixel is 'size' big\n graphic = [x for x in graphic for _ in [0] * size]\n\n scan = [False] * Stella.FRAME_WIDTH\n for n in range(number):\n offset = n*gap*8\n scan[offset:offset + len(graphic)] = graphic\n\n self._player_scan_unshifted[number][size][gap][reflect].append(scan)", "def get_pixels(self):\n\n # pygame board needs to be initialized the first time\n if not self.board:\n self.setup_display(render_gui=False)\n\n self.draw_window(draw_leaderboard=False)\n pixels = pygame.surfarray.array3d(self.window)\n return np.moveaxis(pixels, 1, 0)", "def get_block_positions(self, fig):\n block_positions = []\n\n # Iterates through y + active_piece.y and x + active_piece.x\n for y, row in enumerate(fig, start=self.active_piece.y):\n for x, val in enumerate(row, start=self.active_piece.x):\n if val != 0:\n block_positions.append((x, y))\n\n return block_positions", "def positions_global(anim):\r\n \r\n positions = transforms_global(anim)[:,:,:,3]\r\n return positions[:,:,:3] / positions[:,:,3,np.newaxis]", "def Motion_estimate_compute(data,block_size=16):\n\n nb_blocks = width//block_size*height//block_size\n nb_frames = data.size//frame_size\n frames = np.array(data).reshape(nb_frames,frame_size)\n symbols_stream = [DCT_compute(frames[0],offset=128)]\n print(symbols_stream[-1].shape)\n\n for frame_index in range(1,nb_frames-1,2):\n # I\n symbols_stream.append(DCT_compute(frames[frame_index+1],offset=128))\n print(symbols_stream[-1].shape)\n # P\n P_frame = Motion_estimate_compute_1frame(frames[frame_index-1],\n frames[frame_index+1],\n frames[frame_index],\n block_size=block_size)\n \n print(P_frame[-1].shape)\n symbols_stream.append(P_frame)\n\n # Extra I if there is an odd number of frames\n if nb_frames%2 == 0:\n symbols_stream.append(np.array([-1]))\n symbols_stream.append(DCT_compute(frames[-1],offset=128))\n print(symbols_stream[-1].shape)\n symbols_stream = np.concatenate(symbols_stream)\n \n print(symbols_stream[17870:17890])\n return symbols_stream", "def _create_lod0_array(self, larger_blocks: numpy.ndarray, unique_blocks: numpy.ndarray, offset: Tuple[int, int, int] = None) -> Tuple[List[numpy.ndarray], List[numpy.ndarray]]:\n offset = offset or (0, 0, 0)\n blocks = larger_blocks[1:-1, 1:-1, 1:-1]\n transparent_array = numpy.zeros(larger_blocks.shape, dtype=numpy.uint8)\n models: Dict[int, minecraft_model_reader.MinecraftMesh] = {}\n for block_temp_id in unique_blocks:\n model = models[block_temp_id] = self._get_model(block_temp_id)\n transparent_array[larger_blocks == block_temp_id] = model.is_transparent\n\n def get_transparent_array(offset_transparent_array, transparent_array_):\n return numpy.logical_and(\n offset_transparent_array, # if the next block is transparent\n numpy.logical_not( # but is not the same block with transparency mode 1\n (offset_transparent_array == 1) * (offset_transparent_array == transparent_array_)\n )\n )\n\n middle_transparent_array = transparent_array[1:-1, 1:-1, 1:-1]\n show_up = get_transparent_array(transparent_array[1:-1, 2:, 1:-1], middle_transparent_array)\n show_down = get_transparent_array(transparent_array[1:-1, :-2, 1:-1], middle_transparent_array)\n show_north = get_transparent_array(transparent_array[1:-1, 1:-1, :-2], middle_transparent_array)\n show_south = get_transparent_array(transparent_array[1:-1, 1:-1, 2:], middle_transparent_array)\n show_east = get_transparent_array(transparent_array[2:, 1:-1, 1:-1], middle_transparent_array)\n show_west = get_transparent_array(transparent_array[:-2, 1:-1, 1:-1], middle_transparent_array)\n\n show_map = {\n 'up': show_up,\n 'down': show_down,\n 'north': show_north,\n 'south': show_south,\n 'east': show_east,\n 'west': show_west\n }\n\n chunk_verts = []\n chunk_verts_translucent = []\n\n for block_temp_id, model in models.items():\n # for each unique blockstate in the chunk\n # get the model and the locations of the blocks\n model: minecraft_model_reader.MinecraftMesh\n all_block_locations = numpy.argwhere(blocks == block_temp_id)\n if not all_block_locations.size:\n continue\n where = None\n for cull_dir in model.faces.keys():\n # iterate through each cull direction\n # narrow down the blocks to what should be created for that cull direction\n if cull_dir is None:\n block_locations = all_block_locations\n elif cull_dir in show_map:\n if where is None:\n where = tuple(all_block_locations.T)\n block_locations = all_block_locations[show_map[cull_dir][where]]\n if not block_locations.size:\n continue\n else:\n continue\n\n # the number of blocks and their offsets in chunk space\n block_count = len(block_locations)\n block_offsets = block_locations\n\n # the vertices in model space\n verts = model.verts[cull_dir].reshape((-1, 3))\n tverts = model.texture_coords[cull_dir].reshape((-1, 2))\n faces = model.faces[cull_dir]\n\n # each slice in the first axis is a new block, each slice in the second is a new vertex\n vert_table = numpy.zeros((block_count, faces.size, self._vert_len), dtype=numpy.float32)\n vert_table[:, :, :3] = verts[faces] + block_offsets[:, :].reshape((-1, 1, 3)) + self.offset + offset\n vert_table[:, :, 3:5] = tverts[faces]\n\n vert_index = 0\n for texture_index in model.texture_index[cull_dir]:\n tex_bounds = self._texture_bounds(model.textures[texture_index])\n\n vert_table[:, vert_index:vert_index+3, 5:9] = tex_bounds\n vert_index += 3\n\n vert_table[:, :, 9:12] = model.tint_verts[cull_dir].reshape((-1, 3))[faces] * _brightness_multiplier[cull_dir]\n\n if model.is_transparent == 1:\n chunk_verts_translucent.append(vert_table.ravel())\n else:\n chunk_verts.append(vert_table.ravel())\n\n return chunk_verts, chunk_verts_translucent", "def pseudo_sample(self):\n return (torch.zeros(1, 1, 28, 28), None)", "def uniform_start_probs(self) -> np.ndarray:\n return np.ones(self.n_states) / self.n_states", "def prepro(I):\n # \"\"\" prepro 200x235x3 uint8 frame into 10000 (100x100) 1D float vector \"\"\"\n I = I[35:200] # crop - remove 35px from start & 35px from end of image in x, to reduce redundant parts of image (i.e. after ball passes paddle)\n I = I[::2,::2,0] # downsample by factor of 2\n I[I == 43] = 0 # erase background (background type 1)\n I[I != 0] = 1 # everything else (paddles, ball) just set to 1\n return I.astype(np.float).ravel()", "def _block(self, x, y):\n\n cells = []\n\n ix = x - (x%3)\n iy = y - (y%3)\n\n for y in range(iy, iy+3):\n for x in range(ix, ix+3):\n i = self._index(x, y)\n cells.append(self.data[i])\n\n return cells", "def collect_blocks():\n\n # Below are the position of (c,r) in a block.\n\n #########################\n # (0,0) # (1,0) # (2,0) #\n #########################\n #########################\n # (0,1) # (1,1) # (2,1) #\n #########################\n #########################\n # (0,2) # (1,2) # (2,2) #\n #########################\n\n for x in range(72):\n r, c = x // 9 % 3, x % 3\n if r == 0:\n if c == 0:\n yield x, x + 10\n yield x, x + 11\n yield x, x + 19\n yield x, x + 20\n elif c == 1:\n yield x, x + 8\n yield x, x + 10\n yield x, x + 17\n yield x, x + 19\n else:\n yield x, x + 7\n yield x, x + 8\n yield x, x + 16\n yield x, x + 17\n elif r == 1:\n if c == 0:\n yield x, x + 10\n yield x, x + 11\n elif c == 1:\n yield x, x + 8\n yield x, x + 10\n else:\n yield x, x + 8\n yield x, x + 7", "def get_pix_pos(self):\r\n return vec((self.grid_pos[0]*self.app.cell_width)+TOP_BOTTOM_BUFFER//2+self.app.cell_width//2,\r\n (self.grid_pos[1]*self.app.cell_height) +\r\n TOP_BOTTOM_BUFFER//2+self.app.cell_height//2)\r\n # where Pac-Man starts relative to the board\r", "def block(B):\n return np.array(np.bmat(B))", "def ci(x, y, z):\n\n return (x * 16 + z) * CHUNK_HEIGHT + y", "def poss_block_byrc(self):\n block_horiz, block_vert = self.board_size // self.c_size, self.board_size // self.r_size\n lpos = [self.c_size * no for no in range(block_horiz)]\n vpos = [self.r_size * no for no in range(block_vert)]\n row_poss, col_poss = self.poss_by_block()\n row_poss = [row_poss[x:x + self.c_size] for x in vpos]\n row_poss = [[subset[no] for subset in r] for r in row_poss for no in range(self.r_size)]\n col_poss = [col_poss[x:x + self.r_size] for x in lpos]\n col_poss = [[subset[no] for subset in r] for r in col_poss for no in range(self.c_size)]\n # Rearrange to get blocks in right order (across-down)\n col_poss = [col_poss[i + j * block_vert] for i in range(block_vert) for j in range(block_vert)]\n return row_poss, col_poss", "def sub_blocks(green_prob, num_trials):\n green_trials = round(num_trials*green_prob)\n blue_trials = round(num_trials*(1-green_prob))\n target = [\"green\"]*green_trials+[\"blue\"]*blue_trials\n random.seed(100)\n target = random.sample(target, len(target))\n return target", "def random_start_probs(self) -> np.ndarray:\n return self.random_state.dirichlet(np.ones(self.n_states), size=1).flatten()", "def _build(self):\n ary = np.zeros( (3,3,3), float )\n ary[0,0,0] = ary[1,1,1] = ary[0,1,2] = ary[1,0,2] = 1.\n ary[0,2,0] = ary[0,2,2] = ary[2,0,0] = ary[2,0,2] = 0.5\n ary[1,2,1] = ary[1,2,2] = ary[2,1,1] = ary[2,1,2] = 0.5\n ary[2,2,0] = ary[2,2,1] = 0.25\n ary[2,2,2] = 0.5\n return ary", "def tile_calculation(xi, yi, axi, ayi, positions, weights):\n for j in range(cuda.blockDim.x):\n xj = positions[j,0]\n yj = positions[j,1]\n wj = weights[j]\n axi, ayi = body_body_interaction(xi, yi, xj, yj, wj, axi, ayi)\n return axi, ayi" ]
[ "0.5844929", "0.57490706", "0.5584011", "0.55778044", "0.54837716", "0.5467556", "0.5449255", "0.5446417", "0.542108", "0.54169047", "0.5410056", "0.53835994", "0.53662336", "0.5350493", "0.5349711", "0.5314925", "0.53075755", "0.5303564", "0.5294841", "0.52947766", "0.52850795", "0.5278076", "0.5272137", "0.5271589", "0.52649283", "0.52530146", "0.5246838", "0.5245833", "0.5235102", "0.52331907" ]
0.7223847
0
check for uncollapsed states (inefficient to say the least)
def check_done(grid): for row in grid: for el in row: if not el.collapsed: return False else: return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def states_filter(state):\n if state.count(0) < state.count(1) or state.count(1) < state.count(0) - 1:\n return False\n\n rows = [[i, i+1, i+2] for i in [0, 3, 6]]\n cols = [[i, i+3, i+6] for i in [0, 1, 2]]\n\n winners = set()\n\n for row_indexes in rows:\n row = [state[ind] for ind in row_indexes]\n if row[0] >= 0 and are_same(row):\n winners.add(row[0])\n\n for col_indexes in cols:\n col = [state[ind] for ind in col_indexes]\n if col[0] >= 0 and are_same(col):\n winners.add(col[0])\n\n # We don't look at diags\n return len(winners) <= 1", "def __bool__(self):\n return len(self._states_) > 0", "def collapsed(blk):\n if blk is not None and blk.name in COLLAPSIBLE and\\\n len(blk.values) == 1 and blk.values[0] != 0:\n return True\n return False", "def getAbsorbingStates(m):\n\t\n a=[]\n for r in range(len(m)):\n if(sum(m[r])==0): a.append(r)\n return a", "def all(c):\n states(c)\n etc(c)\n prune(c)", "def is_expanded(self) -> bool:\n return len(self._untried_edges) == 0", "def check(self):\n if (sum(self.state) == 0):\n return -1\n elif (self.state[-1] >= 1):\n return 1\n else:\n return 0", "def find_new_states(tree):\n\trow = tree.get_all_states()\n\t\n\tfor state in row:\n\t\tif state not in all_states_explored:\n\t\t\t\tall_states_explored.append(state)", "def do_keep_expanding(expansion_state) -> bool:\n _, step, trajectory, _, _, is_diverging, _, is_turning = expansion_state\n return (step < max_num_expansions) & ~is_diverging & ~is_turning", "def goal_test(state): \n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] != i*size + j:\n return False \n return True", "def check_state(self):\n pass", "def fullGrid(state):\n return not ((state[:, :, 0] + state[:, :, 1]) == 0).any()", "def is_collapsed(topo, pore_diameter, no_window):\n expected_wind = topo_2_property(topo, property='expected_wind')\n if expected_wind != no_window:\n return True\n elif pore_diameter < 2.8:\n return True\n else:\n return False", "def collapsible(blk):\n if blk is None or blk.name not in COLLAPSIBLE:\n return False\n if find_sandwich_top(blk) is None:\n return False\n return True", "def reached_goal(state):\n return any(map(completely_removed, state['sliders']))", "def goal_test(self, state):\n return state.board==range(self.N*self.N)", "def has_final_states(self):\n return len(self.final_states()) > 0", "def collapsed(self) -> None:", "def backtrack(self):\n print('0')\n if len(self.Stack):\n step = self.Stack.pop()\n # Restore all the Knot affected by the last collapse.\n for (position, space) in step.items():\n self[position] = Knot(space)\n self.wait_to_collapse.add(position)\n return set(step.keys())\n else:\n raise CollapseError(\"No Sulotion\")", "def is_reset_task_states(self, task_state):\r\n return all(self.is_initial_child_state(child) for child in task_state)", "def is_over(self, state):\n return state.current_total == 0", "def get_successors(state): \n \n child_states = []\n \n size = len(state)\n i = 0\n j = 0\n for i in range (size):\n if 0 in state[i]:\n for j in range (size):\n if state[i][j] == 0:\n break \n break\n\n if j != size-1:\n child_states.append ((\"Left\", swap_cells(state, i, j, i, j+1)))\n if j != 0:\n child_states.append ((\"Right\", swap_cells(state, i, j, i, j-1)))\n if i != size-1:\n child_states.append ((\"Up\", swap_cells(state, i, j, i+1, j)))\n if i != 0:\n child_states.append ((\"Down\", swap_cells(state, i, j, i-1, j)))\n \n return child_states", "def _verify_all_states_reachable(states_list):\n\n # This queue stores state names.\n processed_queue = []\n curr_queue = [states_list[0]['name']]\n\n while curr_queue:\n curr_state = curr_queue[0]\n curr_queue = curr_queue[1:]\n\n if curr_state in processed_queue:\n continue\n\n processed_queue.append(curr_state)\n\n curr_state_ind = next(ind for ind, state in enumerate(states_list)\n if state['name'] == curr_state)\n\n for handler in states_list[curr_state_ind]['widget']['handlers']:\n for rule in handler['rule_specs']:\n dest_state = rule['dest']\n if (dest_state not in curr_queue and\n dest_state not in processed_queue and\n dest_state != feconf.END_DEST):\n curr_queue.append(dest_state)\n\n if len(states_list) != len(processed_queue):\n unseen_states = list(\n set([s['name'] for s in states_list]) - set(processed_queue))\n raise Exception('The following states are not reachable from the '\n 'initial state: %s' % ', '.join(unseen_states))", "def prune_states(state: PushState) -> PushState:\n if state and not state[-1]:\n return PushGame.prune_states(state[:-1])\n else:\n return state", "def is_deterministic(self):\n if len(self.initial_states())>1:\n return False\n for state in self.iter_states():\n for transition in state.transitions:\n if len(transition.word_in) != 1:\n return False\n\n transition_classes_by_word_in = full_group_by(\n state.transitions,\n key=lambda t: t.word_in)\n\n for key,transition_class in transition_classes_by_word_in:\n if len(transition_class) > 1:\n return False\n return True", "def misplaced_heuristic(state):\n msp_h = 0\n size = len(state)\n for i in range (size):\n for j in range (size):\n if state[i][j] == 0:\n continue\n elif state[i][j] != i*size + j:\n msp_h += 1\n return msp_h", "def not_pushed_down(self):\n return (self.genus >= 2 and self.n != 0) or (self.genus == 1 and self.n > 1) or (self.genus == 0 and self.n > 3)", "def isGoalState(self, state):\n \"*** YOUR CODE HERE ***\"\n # Utilizaré el método .count del grid, de manera que me contará los trues que haya.\n # Cuando no queden trues, ya hemos acabado.\n return state[1].count() == 0\n # util.raiseNotDefined()", "def notFold(self):\n return not self._fold", "def analyze_state_changes(self):\n graph = self._graph\n lost_chunks = set(self._lost_chunks)\n op_states = self._op_states\n\n # mark lost virtual nodes as lost when some preds are lost\n for n in graph:\n if not isinstance(n.op, VirtualOperand) \\\n or op_states.get(n.op.key) == OperandState.UNSCHEDULED:\n continue\n if any(pred.key in lost_chunks for pred in graph.iter_predecessors(n)):\n lost_chunks.add(n.key)\n\n # collect operands with lost data\n op_key_to_chunks = defaultdict(list)\n lost_ops = set()\n for n in graph:\n op_key_to_chunks[n.op.key].append(n)\n if n.key in lost_chunks:\n lost_ops.add(n.op.key)\n\n # check data on finished operands. when data lost, mark the operand\n # and its successors as affected.\n affected_op_keys = set()\n for op_key in lost_ops:\n affected_op_keys.add(op_key)\n for n in op_key_to_chunks[op_key]:\n affected_op_keys.update(succ.op.key for succ in graph.iter_successors(n))\n\n # scan the graph from bottom and reassign new states\n new_states = dict()\n for chunk in graph.topological_iter(reverse=True):\n op_key = chunk.op.key\n if chunk.op.key not in affected_op_keys:\n continue\n\n can_be_ready = True\n stop_spread_states = (OperandState.RUNNING, OperandState.FINISHED)\n for pred in graph.iter_predecessors(chunk):\n pred_op_key = pred.op.key\n # mark affected, if\n # 1. data of the operand is lost\n # 2. state does not hold data, or data is lost,\n # for instance, operand is freed.\n if pred.key in lost_chunks or op_states.get(pred_op_key) not in stop_spread_states:\n affected_op_keys.add(pred_op_key)\n can_be_ready = False\n\n # update state given data preservation of prior nodes\n chunk_op_state = op_states.get(op_key)\n if can_be_ready and chunk_op_state != OperandState.READY:\n new_states[op_key] = OperandState.READY\n elif not can_be_ready and chunk_op_state != OperandState.UNSCHEDULED:\n new_states[op_key] = OperandState.UNSCHEDULED\n\n op_states.update(new_states)\n return new_states" ]
[ "0.6629532", "0.63425463", "0.61981493", "0.6121164", "0.6120033", "0.60957247", "0.60479707", "0.6040906", "0.59273624", "0.58807003", "0.5855866", "0.58459675", "0.5830927", "0.57972974", "0.5793381", "0.57392114", "0.5736396", "0.5725186", "0.56908005", "0.5680616", "0.56781894", "0.5663987", "0.5621121", "0.55854446", "0.55753386", "0.5573975", "0.5572527", "0.55669415", "0.55634177", "0.5560596" ]
0.66285646
1
Initializes the particles with supplied values for charge c, mass m, and position r.
def __init__(self, charge, mass, position): self.c = charge self.m = mass self.r = position
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self,nparticles,initial_condition):\n self.nparticles = nparticles\n self.particles = np.array([Particle(mass,x,y) for x,y,mass in initial_condition])\n self.mass = np.array([self.particles[i].mass for i in range(len(self.particles))])\n self.position = np.array([self.particles[i].position for i in range(len(self.particles))])\n self.momentum = np.array([self.particles[i].momentum for i in range(len(self.particles))])", "def __init__(self, init_pos_1, init_pos_2, M_1, M_2, spring_cos, equi_len):\n self.p1 = Particle(init_pos_1, M_1)\n self.p2 = Particle(init_pos_2, M_2)\n self.k = spring_cos\n self.L0 = equi_len", "def init_particles(self):\n \n # Each particle is a dimension-K vector. We generate each particle \n # uniformly at random from the space [0,1]^K. \n self.Particles = np.random.uniform(0, 1, (self.Npar, self.K))\n #print(\"Particles: \", self.Particles) \n return None", "def __init__(self, position, momentum, mass):\n self.position = position\n self.momentum = momentum\n self.mass = mass", "def __init__(self, position=np.array([0,0,0], dtype=float),\n velocity=np.array([0,0,0], dtype=float), acceleration=np.array([0,0,0],\n dtype=float), name='A Particle', restMass=1.0, charge=const.elementary_charge):\n self.name = name\n self.position = np.array(position, dtype=float)\n self.velocity = np.array(velocity, dtype=float)\n self.acceleration = np.array(acceleration, dtype=float)\n self.restMass = restMass\n self.charge = charge\n self.electricField = PointElectricFieldClass(sourceParticle=self\n , name='Field from %s'%(self.name))\n self.magneticField = PointMagneticFieldClass(sourceParticle=self\n , name='Field from %s'%(self.name))", "def __init__(self, mass, position, velocity):\n self.mass = mass\n self.position = position\n self.velocity = velocity", "def __init__(self,mass,x=None,p=None,v=None):\n self.setPosition(x)\n self.setMass(mass)\n if p is not None and v is not None:\n raise CoordinateException(\"Initializing a particle can only have momentum or velocity, not both.\")\n elif p is None:\n self.setVelocity(v)\n self.calcMomentumFromVelocity()\n elif v is None:\n self.setMomentum(p)\n self.calcVelocityFromMomentum()", "def __init__(self, Position, M):\n self.pos = Position # Sets x position\n self.m = M # Sets mass\n # Initial velocity and acceleration set to be zero\n self.vel = np.zeros((2,))\n self.acc = np.zeros((2,))", "def __init__(self, mass, radius, position, velocity):\r\n self.mass = mass\r\n self.radius = radius\r\n \r\n # last position and velocity\r\n self.position = np.array(position)\r\n self.velocity = np.array(velocity)\r\n \r\n # all position and velocities recorded during the simulation\r\n self.solpos = [np.copy(self.position)]\r\n self.solvel = [np.copy(self.velocity)]\r\n self.solvel_mag = [np.linalg.norm(np.copy(self.velocity))]", "def __init__(self, particles):\n self.particles = particles", "def initialize(self):\n\n myg = grid_setup(self.rp, ng=4)\n\n bc_dens, bc_xodd, bc_yodd = bc_setup(self.rp)\n\n my_data = patch.CellCenterData2d(myg)\n\n my_data.register_var(\"density\", bc_dens)\n my_data.register_var(\"x-velocity\", bc_xodd)\n my_data.register_var(\"y-velocity\", bc_yodd)\n\n # we'll keep the internal energy around just as a diagnostic\n my_data.register_var(\"eint\", bc_dens)\n\n # phi -- used for the projections. The boundary conditions\n # here depend on velocity. At a wall or inflow, we already\n # have the velocity we want on the boundary, so we want\n # Neumann (dphi/dn = 0). For outflow, we want Dirichlet (phi\n # = 0) -- this ensures that we do not introduce any tangental\n # acceleration.\n bcs = []\n for bc in [self.rp.get_param(\"mesh.xlboundary\"),\n self.rp.get_param(\"mesh.xrboundary\"),\n self.rp.get_param(\"mesh.ylboundary\"),\n self.rp.get_param(\"mesh.yrboundary\")]:\n if bc == \"periodic\":\n bctype = \"periodic\"\n elif bc in [\"reflect\", \"slipwall\"]:\n bctype = \"neumann\"\n elif bc in [\"outflow\"]:\n bctype = \"dirichlet\"\n bcs.append(bctype)\n\n bc_phi = bnd.BC(xlb=bcs[0], xrb=bcs[1], ylb=bcs[2], yrb=bcs[3])\n\n my_data.register_var(\"phi-MAC\", bc_phi)\n my_data.register_var(\"phi\", bc_phi)\n\n # gradp -- used in the projection and interface states. We'll do the\n # same BCs as density\n my_data.register_var(\"gradp_x\", bc_dens)\n my_data.register_var(\"gradp_y\", bc_dens)\n\n my_data.create()\n\n self.cc_data = my_data\n\n # some auxiliary data that we'll need to fill GC in, but isn't\n # really part of the main solution\n aux_data = patch.CellCenterData2d(myg)\n\n aux_data.register_var(\"coeff\", bc_dens)\n aux_data.register_var(\"source_y\", bc_yodd)\n\n aux_data.create()\n self.aux_data = aux_data\n\n # we also need storage for the 1-d base state -- we'll store this\n # in the main class directly.\n self.base[\"rho0\"] = Basestate(myg.ny, ng=myg.ng)\n self.base[\"p0\"] = Basestate(myg.ny, ng=myg.ng)\n\n # now set the initial conditions for the problem\n problem = importlib.import_module(f\"pyro.lm_atm.problems.{self.problem_name}\")\n problem.init_data(self.cc_data, self.base, self.rp)\n\n # Construct beta_0\n gamma = self.rp.get_param(\"eos.gamma\")\n self.base[\"beta0\"] = Basestate(myg.ny, ng=myg.ng)\n self.base[\"beta0\"].d[:] = self.base[\"p0\"].d**(1.0/gamma)\n\n # we'll also need beta_0 on vertical edges -- on the domain edges,\n # just do piecewise constant\n self.base[\"beta0-edges\"] = Basestate(myg.ny, ng=myg.ng)\n self.base[\"beta0-edges\"].jp(1)[:] = \\\n 0.5*(self.base[\"beta0\"].v() + self.base[\"beta0\"].jp(1))\n self.base[\"beta0-edges\"].d[myg.jlo] = self.base[\"beta0\"].d[myg.jlo]\n self.base[\"beta0-edges\"].d[myg.jhi+1] = self.base[\"beta0\"].d[myg.jhi]", "def _init_particles(self):\n self.NPART = self.grid.get_npart()\n self.particles = np.empty(self.NPART, dtype=object)\n for i in range(self.NPART):\n tmem = TMEM\n ux = UXM + UPRIME*normal()*LANGFACTOR\n vy = VYM + UPRIME*normal()*LANGFACTOR\n self.particles[i] = Particle(tmem=tmem, ux=ux, vy=vy)\n #\n # PUT THE PARTICLES IN THE CELLS.\n # LOOP OVER CELLS AND DEFINE THEIR PARTICLES.\n # FOR NOW, ONLY POSITION DEPENDS ON SPACE HEIGHT & MEMORY DO NOT.\n # FIRST THE TREE PARTICLES, THEN THE BUILDING PARTICLES.\n #\n NX = self.grid.NX\n NY = self.grid.NY\n icounter = 0\n for i in range(NX - 1):\n for j in range(NY - 1):\n cell = self.grid.CELLS[i, j]\n x = self.grid.XCELL[i, j]\n y = self.grid.YCELL[i, j]\n for k in range(cell.NPARTTR):\n self.particles[k + icounter].update(x=x, y=y, type=1)\n for k in range(cell.NPARTRAD):\n self.particles[k + cell.NPARTTR + icounter].update(x=x, y=y, type=2)\n icounter += cell.NPARTTR + cell.NPARTRAD", "def __init__(self, init_pos, init_stdev, num_particles, sense_noise):\n self.particles = np.random.multivariate_normal(\n init_pos, [[init_stdev**2, 0], [0, init_stdev**2]], num_particles)\n self.weights = np.array(\n [1. / num_particles for _ in range(num_particles)])\n self.n = num_particles\n self.sense_noise = sense_noise", "def __init__(self,nparticles,size, mass=1, G=1, boundary_periodic = True,early_universe=False, softner=1, position = [], momentum = []):\n self.softner = softner\n self.G = G\n self.boundary_periodic = boundary_periodic\n self.nparticles = nparticles\n self.size = size\n self.mass = np.ones(nparticles)*mass\n #If the boundary condition are not periodic, the grid_size is double but particle kept in the first quadrant so \n #that the particles cannot feel the effect of the particles closed to the opposite boundary when we take the convolution\n if boundary_periodic==True:\n self.grid_size = size\n else:\n self.grid_size = 2*size\n #Initialize the partticle grid\n # if early_universe == True:\n # self.ptclgrid.early_universe_grid(softner)\n # self.mass = self.ptclgrid.mass\n self.ptclgrid = ParticleGrid(nparticles,self.grid_size,self.size, mass=self.mass, soft=softner, early_universe=early_universe)\n #If initial position are givem, place the particle to the right place on the grid\n if len(position) != 0:\n self.ptclgrid.update_position(position, mass)\n\n self.grid = self.ptclgrid.grid\n self.grid_pos = self.ptclgrid.grid_pos\n x0,y0 = self.ptclgrid.position.transpose()\n initial_condition = np.array([x0,y0, self.mass]).transpose()\n #Initialize the Particle list containing the position and momentum of the particles\n self.particles = ParticleList(nparticles, initial_condition)\n #If initial mometa are given, intialize it \n if len(momentum) != 0:\n self.particles.momentum = momentum\n #Computes the green function on the grid\n self.compute_green_function(self.grid_size)\n #Initialize the array with the acceleration of the particles\n self.acc = np.zeros((len(self),2))", "def initialize_system(how):\n \n if how == 'random':\n particles = np.random.rand(MC_par['N_particles'],MC_par['dim'])*MC_par['L_box'] \n elif 'array' in how:\n n = np.power(MC_par['N_particles'],1.0/MC_par['dim'])\n n = int(n) + 1\n n_generated = n**MC_par['dim']\n if MC_par['dim'] == 2:\n X,Y = np.mgrid[0:n,0:n]\n more_particles = np.array([X.flatten(),Y.flatten()]).T\n elif MC_par['dim'] == 3:\n X,Y,Z = np.mgrid[0:n,0:n,0:n]\n more_particles = np.array([X.flatten(),Y.flatten(),Z.flatten()]).T\n n_excess = n_generated - MC_par['N_particles']\n # Remove the particles in excess (randomly)\n to_remove = np.random.permutation(n_generated)[:n_excess]\n particles = np.delete(more_particles, to_remove, axis = 0)\n # normalize\n particles = particles * MC_par['L_box'] / n\n \n if 'noisy' in how:\n noise = (np.random.rand(MC_par['N_particles'],MC_par['dim']) - 0.5) * 0.5 * MC_par['L_box']/n\n particles = particles + noise\n \n if 'charged' in how:\n particles = np.append(particles, np.ones((MC_par['N_particles'],1)), axis = 1) # add a column for charge\n # and flip half charges\n particles[::2,2] *= -1\n \n return particles", "def __init__(self, T_e, n_e, Z=None, particle=\"p\"):\n self.T_e = T_e\n self.n_e = n_e\n self.particle = particle\n self.Z = _grab_charge(particle, Z)\n # extract mass from particle\n self.ionMass = particle_mass(self.particle)", "def __init__(self, mass, x, y,px=0.0,py=0.0):\n self.mass = mass\n self.position = np.array([x,y])\n self.momentum = np.array([px,py])", "def __init__(self, center, radius, material):\n self.center = center\n self.radius = radius\n self.material = material", "def __init__(self, center, radius, material):\n self.center = center\n self.radius = radius\n self.material = material", "def initializeParticles(self):\n import itertools\n import random\n #create a list of possible ghost permutations, where each of three ghosts can be on any of the legal positions in the boards.\n permutations = list(itertools.product(self.legalIntentions, repeat=self.numAgents))\n \n random.shuffle(permutations)\n p = len(permutations)\n n = self.numParticles\n self.particles = []\n #create the particles\n while n >= p:\n self.particles += permutations\n n -= p\n #add the remainder\n self.particles += permutations[0: n - 1]", "def __init__(self,E,px,py,pz):\n Particle.__init__(self)\n self.E=float(E)\n self.px=float(px)\n self.py=float(py)\n self.pz=float(pz)\n self.cal_pt()\n self.cal_phi()\n self.cal_eta()\n #self.cal_mass()\n #print self.E,self.px,self.py,self.pz\n #print self.pt,self.phi,self.eta", "def assignPositions(self):\n n = int(math.ceil(self.numAtoms**(1.0/3.0))) # Number of atoms in a direction\n particle = 0 # Particles placed so far\n \n for x in range(0, n):\n for y in range(0, n):\n for z in range(0, n):\n if (particle < self.numAtoms):\n self.atoms[particle].x = x * self.sigma\n self.atoms[particle].y = y * self.sigma \n self.atoms[particle].z = z * self.sigma\n particle += 1", "def __init__(self):\n self.position = Vector2()\n self.velocity = Vector2()\n self.update_parameters()\n self.mass = 0.18 # Mass of Sphero robot in kilograms", "def __init__(self, func, init_pos, n_particles):\n self.func = func\n self.n_particles = n_particles\n self.init_pos = np.array(init_pos)\n self.particle_dim = len(init_pos)\n # Initialize particle positions using a uniform distribution\n self.particles_pos = np.random.uniform(size=(n_particles, self.particle_dim) ) \\\n * self.init_pos\n # Initialize particle velocities using a uniform distribution\n self.velocities = np.random.uniform(size=(n_particles, self.particle_dim))\n\n # Initialize the best positions\n self.g_best = init_pos\n self.p_best = self.particles_pos\n self.phi = 2", "def initialize_particle_cloud(self, xy_theta=None):\n if xy_theta == None:\n xy_theta = convert_pose_to_xy_and_theta(self.odom_pose.pose)\n rad = 1 # meters\n\n self.particle_cloud = []\n self.particle_cloud.append(Particle(xy_theta[0], xy_theta[1], xy_theta[2]))\n for i in range(self.n_particles - 1):\n # initial facing of the particle\n theta = random.random() * 360\n\n # compute params to generate x,y in a circle\n other_theta = random.random() * 360\n radius = random.random() * rad\n # x => straight ahead\n x = radius * math.sin(other_theta) + xy_theta[0]\n y = radius * math.cos(other_theta) + xy_theta[1]\n particle = Particle(x, y, theta)\n self.particle_cloud.append(particle)\n\n self.normalize_particles()\n self.update_robot_pose()", "def __init__(self, number_of_particles, restitution_coefficient, initial_positions, initial_velocities, masses,\n radii, pbc):\n self.N = number_of_particles # amount of particles\n self.restitution_coefficient = restitution_coefficient # coefficient determining the energy lost in collisions\n # initialize variables used in the class\n self.positions = np.zeros((self.N, 3)) # positions of particles\n self.initial_positions = np.zeros((self.N, 3)) # help variable to compute mean square displacement\n self.velocities = np.zeros((self.N, 3)) # velocities of particles\n self.masses = np.zeros(self.N) # mass of each particle\n self.radii = np.zeros(self.N) # radius of each particle\n self.collision_count_particles = np.zeros(self.N) # array keeping track of the number of collisions\n\n # set parameters equal to the input to the class. Use .copy() such that the parameters can be used in outer loop\n self.positions = initial_positions.copy()\n self.initial_positions = initial_positions.copy()\n self.velocities = initial_velocities.copy()\n self.masses = masses\n self.radii = radii\n # a priority queue / heap queue of tuples of (time_collision, collision_entities, collision_count when\n # computing the collision, box number of the particles). The collision count at computation is used to\n # ignore non-valid collisions due to the involved particles being in other collisions between computation and\n # collision. Box number is needed for the pbc.\n self.collision_queue = [] # heap queue needs list structure to work\n\n # In order to create 27 copies for pbc in three dimensions one need to known their relation to the original\n # box. These are given by offsets. Offsets is also used to correct positions of particles colliding in\n # different boxes (due to the pbc).\n self.offsets = [(-1, 1, 1), (0, 1, 1), (1, 1, 1), (-1, 0, 1), (0, 0, 1), (1, 0, 1), (-1, -1, 1), (0, -1, 1),\n (1, -1, 1), (-1, 1, 0), (0, 1, 0), (1, 1, 0), (-1, 0, 0), (0, 0, 0), (1, 0, 0), (-1, -1, 0),\n (0, -1, 0), (1, -1, 0), (-1, 1, -1), (0, 1, -1), (1, 1, -1), (-1, 0, -1), (0, 0, -1),\n (1, 0, -1), (-1, -1, -1), (0, -1, -1), (1, -1, -1)]\n # Crossings is used to compute current positions due to the periodic boundary conditions. It essentially get\n # updated every time a particle cross the edge in the x-, y- or z-direction.\n self.crossings = np.zeros((self.N, 3))\n\n self.pbc = pbc # periodic boundary conditions", "def __init__( self, screen, x=0.0, y=0.0, vx=0.0, vy=0.0, r=1.0, m=1.0, color=(255,255,0) ):\n\t\t\n\t\tself.position = Vector( x, y )\n\t\tself.velocity = Vector( vx, vy )\n\t\tself.mass = m\n\t\tself.r = int(r)\n\t\tself.color = color\n\t\tself.screen = screen", "def __init__(self, mass, radius, position, velocity):\n self.mass = mass\n self.radius = radius\n self.position = position\n self.velocity = velocity\n print(self.velocity)\n self.vafter = np.copy(velocity) # temp storage for velocity of next step\n self.delete = False", "def __init__(self,circlePos,circleRad,circleVel):\n self.circlePos=circlePos\n self.circleRad=circleRad\n self.circleVel=circleVel", "def __init__(self,r,x_c,y_c,z_c):\n self.r = r\n self.x_c = x_c\n self.y_c = y_c\n self.z_c = z_c" ]
[ "0.6975022", "0.6936636", "0.683775", "0.6779028", "0.67131174", "0.6454496", "0.64482003", "0.63693637", "0.63533765", "0.63069123", "0.6258959", "0.62416065", "0.6235465", "0.61987317", "0.6110885", "0.60588974", "0.6033223", "0.60294825", "0.60294825", "0.5966522", "0.5964707", "0.5940008", "0.5889009", "0.58715373", "0.58525395", "0.5837976", "0.58251625", "0.58135056", "0.57996863", "0.57979393" ]
0.7580843
0
Create dict for the delete button
def create_delete_buttons_dict(self): btns_dict = dict() for index, _ in enumerate(self.games): btns_dict[index] = { "text": "X", "on_click": self.delete, "on_click_params": [index], } return btns_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_delete(self, *args, **kw):\n return dict(args=args)", "def create_buttons_dict(self):\n btns_dict = dict()\n for index, game in enumerate(self.games):\n name = game.split(\".\")[0]\n btns_dict[name] = {\n \"text\": name,\n \"on_click\": self.load,\n \"on_click_params\": [index],\n }\n return btns_dict", "def view_delete():\n\n return jsonify(\n get_dict(\"url\", \"args\", \"form\", \"data\", \"origin\", \"headers\", \"files\", \"json\")\n )", "def create_buttons(self):\r\n return []", "def events_delete_btns(self):\n events = pg.event.get()\n for btn in self.delete_btns:\n btn.listen(events)", "def delete_args():\n return {\"id\": fields.UUID(required=True, location=\"view_args\")}", "def test_delete_custom_button(self):\n pass", "def createButtons(self):\r\n buttonsPosition = {\r\n \"BROWSE\": (3, 2),\r\n \"CALCULATE\": (13, 2),\r\n \"SAVE\": (14, 0),\r\n \"CLEAR\": (14, 1),\r\n \"DELETE\": (14, 2),\r\n \"PRINT\": (13, 3),\r\n \"GENERATE BILL\": (14, 3)\r\n }\r\n self.buttons = {}\r\n for widgetName, position in buttonsPosition.items():\r\n self.button = QPushButton(widgetName)\r\n\r\n self.button.setStyleSheet(\"\"\"\r\n QPushButton {\r\n\r\n background-color: #A8DBC5;\r\n font-family: arial;\r\n font-weight: bold;\r\n font-size: 12px;\r\n border-color: white;\r\n }\r\n QPushButton:hover {\r\n background-color: #DAE0E2;\r\n }\r\n \"\"\")\r\n self.grid.addWidget(self.button, position[0], position[1])\r\n self.buttons[widgetName] = self.button\r\n # Setting calendar icon\r\n self.buttons[\"BROWSE\"].setIcon(QIcon(\"calendarr.png\"))\r\n # Buttons Signals\r\n self.buttons[\"CLEAR\"].clicked.connect(self.clearAll)\r\n self.buttons[\"BROWSE\"].clicked.connect(self.calendar)\r\n self.buttons[\"CALCULATE\"].clicked.connect(self.calculate)\r\n self.buttons[\"PRINT\"].clicked.connect(self.printBill)", "def delete_menu():", "def get_actions(self, request):\n actions = super().get_actions(request)\n actions.pop('delete_selected', None)\n\n return actions", "def create_delete_entry(columns: list, win, x: int, y: int):\n global choice, search_delete_enter\n window_for_search = Frame(win)\n window_for_search.place(x=x, y=y)\n search_delete_enter = ttk.Combobox(window_for_search, values=columns, height=3)\n search_delete_enter.set(u'Выбор параметра')\n search_delete_enter.grid(column=0, row=0)\n choice = search_delete_enter\n return choice", "def delete():\n add_book_tk = DeleteBookDialog()\n entries_args = [\n (\"Book ID : \", 0.5),\n ]\n add_book_tk.create_components(entries_args)\n add_book_tk.mainloop()", "def createButtonsWithoutTabs(self, reclassificationDict):\n formLayout = QtGui.QFormLayout()\n self.createWidgetWithoutTabs(formLayout)\n sortedButtonNames = []\n for category in reclassificationDict.keys():\n if category in ['version', 'uiParameterJsonDict']:\n continue\n for edgvClass in reclassificationDict[category].keys():\n for button in reclassificationDict[category][edgvClass].keys():\n sortedButtonNames.append(button)\n sortedButtonNames.sort()\n for button in sortedButtonNames: \n pushButton = self.createButton(button)\n formLayout.addRow(pushButton)", "def button_captions(self, obj):\n return {\n 'BO_SAVE_CAPTION': '',\n 'BO_SAVE_AS_NEW_CAPTION': '',\n 'BO_SAVE_AND_CONT_CAPTION': '',\n 'BO_SAVE_AND_ADD_ANOTHER_CAPTION': '',\n 'BO_DELETE_CAPTION': '',\n }", "def gen_delete_markup() -> ReplyKeyboardRemove:\n return delete_markup", "def __actions__(self, obj):\n value = '<div>'\n clase = 'actions'\n id = str(obj.id_atributos_por_tipo_item)\n id_tipo = UrlParser.parse_id(request.url, \"tipositems\")\n \n \n if PoseePermiso('redefinir tipo item',\n id_tipo_item=obj.id_tipo_item).is_met(request.environ):\n value += '<div>' + \\\n '<a href=\"./' + id + '/edit\" ' + \\\n 'class=\"' + clase + '\">Modificar</a>' + \\\n '</div><br />'\n\n if obj.puede_eliminarse():\n if PoseePermiso('redefinir tipo item',\n id_tipo_item=obj.id_tipo_item).is_met(request.environ):\n value += '<div><form method=\"POST\" action=\"' + id + '\" class=\"button-to\">'+\\\n '<input type=\"hidden\" name=\"_method\" value=\"DELETE\" />' +\\\n '<input onclick=\"return confirm(\\'Está seguro?\\');\" value=\"Eliminar\" type=\"submit\" '+\\\n 'style=\"background-color: transparent; float:left; border:0; color: #286571;'+\\\n 'display: inline; margin: 0; padding: 0; margin-left:-3px;\" class=\"' + clase + '\"/>'+\\\n '</form></div><br />'\n value += '</div>'\n return value", "def create_buttons(self):\n\t\t\n\t\tbutton_add = Gtk.ToolButton()\n\t\tbutton_add.set_icon_name(\"gtk-add\")\n\t\tbutton_add.set_sensitive(False)\n\t\tbutton_add.set_tooltip_text(_(\"Create new device\"))\n\t\tself.toolbar.insert(button_add, 0)\n\t\tself.buttons[\"add\"] = button_add\n\t\tbutton_add.connect(\"clicked\", self.on_add_clicked)\n\t\t\n\t\tbutton_delete = Gtk.ToolButton()\n\t\tbutton_delete.set_icon_name(\"gtk-delete\")\n\t\tbutton_delete.set_sensitive(False)\n\t\tbutton_delete.set_tooltip_text(_(\"Delete selected device\"))\t\t\n\t\tself.toolbar.insert(button_delete, 1)\n\t\tself.buttons[\"delete\"] = button_delete\n\t\tbutton_delete.connect(\"clicked\", self.on_delete_clicked)\n\t\t\n\t\tself.toolbar.insert(Gtk.SeparatorToolItem(), 2)\n\t\t\n\t\tbutton_edit = Gtk.ToolButton()\n\t\tbutton_edit.set_icon_name(\"gtk-edit\")\n\t\tbutton_edit.set_sensitive(False)\n\t\tbutton_edit.set_tooltip_text(_(\"Edit or resize device\"))\n\t\tself.toolbar.insert(button_edit, 3)\n\t\tself.buttons[\"edit\"] = button_edit\n\t\tbutton_edit.connect(\"clicked\", self.on_edit_clicked)\n\t\t\n\t\t\n\t\tbutton_umount = Gtk.ToolButton()\n\t\tbutton_umount.set_icon_name(\"emblem-readonly\")\n\t\tbutton_umount.set_sensitive(False)\n\t\tbutton_umount.set_tooltip_text(_(\"Unmount selected device\"))\n\t\tself.toolbar.insert(button_umount, 4)\n\t\tself.buttons[\"umount\"] = button_umount\n\t\tbutton_umount.connect(\"clicked\", self.on_umount_clicked)\n\t\t\n\t\tself.toolbar.insert(Gtk.SeparatorToolItem(), 5)\n\t\t\n\t\tbutton_apply = Gtk.ToolButton()\n\t\tbutton_apply.set_icon_name(\"gtk-apply\")\n\t\tbutton_apply.set_sensitive(False)\n\t\tbutton_apply.set_tooltip_text(_(\"Apply queued actions\"))\n\t\tself.toolbar.insert(button_apply, 6)\n\t\tself.buttons[\"apply\"] = button_apply\n\t\tbutton_apply.connect(\"clicked\", self.on_apply_clicked)\n\t\t\n\t\tbutton_clear = Gtk.ToolButton()\n\t\tbutton_clear.set_icon_name(\"gtk-clear\")\n\t\tbutton_clear.set_sensitive(False)\n\t\tbutton_clear.set_tooltip_text(_(\"Clear queued actions\"))\n\t\tself.toolbar.insert(button_clear, 7)\n\t\tself.buttons[\"clear\"] = button_clear\n\t\tbutton_clear.connect(\"clicked\", self.on_clear_clicked)", "def aboutToDelete(self):\n \n pass", "def aboutToDelete(self):\n \n pass", "def aboutToDelete(self):\n \n pass", "def delete_button(self):\n self.pop_up_del = Toplevel(master)\n self.pop_up_del.geometry(\"500x50\")\n\n self.del_label = Label(self.pop_up_del, text=\"Are you sure you want to delete this item?\", font=\"roboto 12\")\n self.del_label.place(relx=0.5, rely=0.01, anchor=\"n\")\n\n self.del_button = Button(self.pop_up_del, text=\"DELETE\", command=self.delete_item)\n self.del_button.place(relx=0.4, rely=0.5, anchor=\"n\")\n\n self.keep_button = Button(self.pop_up_del, text=\"CANCEL\", command=self.close_1)\n self.keep_button.place(relx=0.6, rely=0.5, anchor=\"n\")", "def create_delete_window():\n global data, column_names, output1, win_create_delete_str, place_for_enter\n try:\n column_names = data[0]\n win_create_delete_str = Toplevel(root, relief=SUNKEN, bd=10, bg=\"light sky blue\")\n win_create_delete_str.title(\"Окно выбора данных\")\n win_create_delete_str.minsize(width=500, height=300)\n win_create_delete_str.resizable(height=False, width=False)\n create_delete_entry(column_names, win_create_delete_str, 5, 10)\n enter_data_button = ttk.Button(win_create_delete_str, text=\" Ввод \", command=choice_of_param)\n enter_data_button.place(x=420, y=10)\n place_for_enter = Entry(win_create_delete_str)\n place_for_enter.place(x=150, y=10, width=250)\n output1 = Text(win_create_delete_str, width=40, height=10, font=\"12\", wrap=WORD)\n output1.place(x=10, y=80)\n\n but_delete = ttk.Button(win_create_delete_str, text=\" Удалить \", command=delete_record)\n but_delete.place(x=394, y=85)\n\n but_cancel = ttk.Button(win_create_delete_str, text=\" Отмена \", command=cancel)\n but_cancel.place(x=394, y=135)\n\n but_cancel = ttk.Button(win_create_delete_str, text=\" Обновить \", command=output)\n but_cancel.place(x=390, y=185)\n\n except IndexError:\n mistake_load_table()", "def delete(self):\n ...", "def get_crud_template_dict():\n return CRUD_TEMPLATE_DICT", "def __actions__(self, obj):\n\t\t\tprimary_fields \t= self.__provider__.get_primary_fields(self.__entity__)\n\t\t\tpklist \t\t= '/'.join(map(lambda x: str(getattr(obj, x)), primary_fields))\n\n\t\t\tvalue \t\t= '<div>'\n\t\t\tif has_permission('editar_LB'):\n\t\t\t\tvalue = value + '<div><a class=\"edit_link\" href=\"'+pklist+'/edit\" style=\"text-decoration:none\">edit</a></div>'\n\t\t\tif has_permission('eliminar_LB'):\n\t\t\t\tvalue = value + '<div><form method=\"POST\" action=\"'+pklist+'\" class=\"button-to\"><input type=\"hidden\" name=\"_method\" value=\"DELETE\" /><input class=\"delete-button\" onclick=\"return confirm(\\'Est&aacute; seguro que desea eliminar?\\');\" value=\"delete\" type=\"submit\" style=\"background-color: transparent; float:left; border:0; color: #286571; display: inline; margin: 0; padding: 0;\"/></form></div>'\n\t\t\tvalue = value + '</div>'\n\t\t\treturn value", "def help_delete(self):\n print(DELETE)", "def post_delete(self, *args, **kw):\n id_atributo = int(args[0])\n transaction.begin()\n attr = AtributosPorTipoItem.por_id(id_atributo)\n DBSession.delete(attr)\n transaction.commit()\n flash(\"Atributo Eliminado\")\n redirect(\"./\")", "def delete_command():\n global selected_tuple\n backend.delete(selected_tuple[0])", "def get_delete_confirmation_form(self, data):\n self.add_success(data)\n rv = self.get((data[self.id_field], self.delete_url))\n assert not is_404(rv)\n assert in_response(rv, 'Delete {}'.format(data[self.name_field]))\n return rv", "def on_pushButton_delete_clicked(self):\n row = self.tableWidget.currentIndex().row()\n # 找到对于行的第一项(XX编码项)\n xxbm = unicode(self.tableWidget.takeItem(row, 0).text())\n self.tableWidget.setItem(row, 0, QTableWidgetItem(xxbm))\n content = unicode(self.comboBox.currentText())\n if content == \"职称表\":\n result_signal = self.sql_client.delete_zc_info(xxbm)\n elif content == \"文化表\":\n result_signal = self.sql_client.delete_wh_info(xxbm)\n else:\n result_signal = self.sql_client.delete_bm_info(xxbm)\n\n QMessageBox.information(self, 'Message', \"删除成功!\", QMessageBox.Yes)\n self.tableWidget.removeRow(row)" ]
[ "0.6565306", "0.6262134", "0.61290956", "0.61241233", "0.6069019", "0.59521407", "0.59131736", "0.5907664", "0.59048676", "0.5899396", "0.5856733", "0.58457917", "0.58292425", "0.5824716", "0.5797817", "0.5747458", "0.5742509", "0.5717527", "0.5717527", "0.5717527", "0.56944245", "0.56399083", "0.56381655", "0.5629374", "0.5623164", "0.5616365", "0.55961496", "0.55660754", "0.5564112", "0.55573153" ]
0.80937445
0
Create the dict for all buttons
def create_buttons_dict(self): btns_dict = dict() for index, game in enumerate(self.games): name = game.split(".")[0] btns_dict[name] = { "text": name, "on_click": self.load, "on_click_params": [index], } return btns_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def createButtons(self):\r\n buttonsPosition = {\r\n \"BROWSE\": (3, 2),\r\n \"CALCULATE\": (13, 2),\r\n \"SAVE\": (14, 0),\r\n \"CLEAR\": (14, 1),\r\n \"DELETE\": (14, 2),\r\n \"PRINT\": (13, 3),\r\n \"GENERATE BILL\": (14, 3)\r\n }\r\n self.buttons = {}\r\n for widgetName, position in buttonsPosition.items():\r\n self.button = QPushButton(widgetName)\r\n\r\n self.button.setStyleSheet(\"\"\"\r\n QPushButton {\r\n\r\n background-color: #A8DBC5;\r\n font-family: arial;\r\n font-weight: bold;\r\n font-size: 12px;\r\n border-color: white;\r\n }\r\n QPushButton:hover {\r\n background-color: #DAE0E2;\r\n }\r\n \"\"\")\r\n self.grid.addWidget(self.button, position[0], position[1])\r\n self.buttons[widgetName] = self.button\r\n # Setting calendar icon\r\n self.buttons[\"BROWSE\"].setIcon(QIcon(\"calendarr.png\"))\r\n # Buttons Signals\r\n self.buttons[\"CLEAR\"].clicked.connect(self.clearAll)\r\n self.buttons[\"BROWSE\"].clicked.connect(self.calendar)\r\n self.buttons[\"CALCULATE\"].clicked.connect(self.calculate)\r\n self.buttons[\"PRINT\"].clicked.connect(self.printBill)", "def create_delete_buttons_dict(self):\n btns_dict = dict()\n for index, _ in enumerate(self.games):\n btns_dict[index] = {\n \"text\": \"X\",\n \"on_click\": self.delete,\n \"on_click_params\": [index],\n }\n return btns_dict", "def create_buttons(self: object) -> None:\n buttons = {\"BWT\": Button(\n self, text=\"BWT\", command=self.bwt_window, width = 15).grid(row=3,column=0, padx=5, pady=6),\n \"DEBWT\": Button(\n self, text=\"reverse BWT\", command=self.debwt_window,width = 15).grid(\n row=6,column=0, padx=5, pady=6),\n \"Huffcode\": Button(\n self, text=\"Huffman coding\", command=self.huffcode_window, width = 15).grid(\n row=3,column=1, padx=5, pady=6),\n \"Huffdecode\": Button(\n self, text=\"Huffman decoding\", command=self.huffdecode_window, width = 15).grid(\n row=6,column=1, padx=5, pady=6),\n \"fullzip\": Button(\n self, text=\"Full zip\", command=self.fullzip_window, width = 15).grid(\n row=3,column=2, padx=5, pady=6),\n \"fullunzip\": Button(\n self, text=\"Full unzip\", command=self.fullunzip_window, width = 15).grid(\n row=6,column=2, padx=5, pady=6),\n \"generate\": Button(\n self, text=\"Generate\", command=self.generate_random, width = 15).grid(\n row=10,column=1, padx=5, pady=6),\n \"save\": Button(\n self, text=\"Save\", command=self.save_random, width = 15).grid(\n row=11,column=1, padx=5, pady=6)}\n\n self.buttons = buttons", "def create_buttons(self):\r\n return []", "def createButtons (self,buttons):\n \n assert(self.frame)\n self.buttonsFrame = f = Tk.Frame(self.top)\n f.pack(side=\"top\",padx=30)\n \n # Buttons is a list of dictionaries, with an empty dictionary at the end if there is only one entry.\n buttonList = []\n for d in buttons:\n text = d.get(\"text\",\"<missing button name>\")\n isDefault = d.get(\"default\",False)\n underline = d.get(\"underline\",0)\n command = d.get(\"command\",None)\n bd = g.choose(isDefault,4,2)\n \n b = Tk.Button(f,width=6,text=text,bd=bd,underline=underline,command=command)\n b.pack(side=\"left\",padx=5,pady=10)\n buttonList.append(b)\n \n if isDefault and command:\n self.defaultButtonCommand = command\n \n return buttonList", "def save_buttons_values(self):\r\n pass", "def generate_buttons(self):\n raise Exception('Implement me!')", "def get_request_buttons():\r\n return request_buttons", "def generate_buttons(self):\n buttons = []\n for mtime, player in self.mainwindow.data.get_all_players():\n button = OpenByPlayerName.PlayerNameButton(self, player, mtime)\n buttons.append((mtime, player.name.lower(), button))\n return buttons", "def _createButtons(self, methods):\n mbutton=Menubutton(self.mainwin, text='Options', width=12,\n borderwidth=2, relief=RIDGE,\n activeforeground='red')\n menu=Menu(mbutton,tearoff=0)\n mbutton['menu']=menu\n mbutton.pack(side=BOTTOM,fill=BOTH)\n for m in methods:\n menu.add_radiobutton(label=self.gui_methods[m[0]], \n indicatoron=0, \n command=m[1])\n b=Button(self.mainwin,text='Create Calculation',command=self.createJobDialog)\n b.pack(side=BOTTOM,fill=BOTH) \n return", "def create_buttons(self, options):\n buttons = []\n for option in options:\n if option['button_type'] == \"postback\":\n buttons.append(\n {\"type\": option['button_type'], \"title\": option['title'], \"payload\": option['payload']}\n )\n if option['button_type'] == \"web_url\":\n buttons.append(\n {\"type\": option['button_type'], \"title\": option['title'], \"url\": option['url']}\n )\n return buttons", "def make_dict(self):\n return self.generate_widgets()", "def getbuttons(self):\n return self.buttons", "def makeButtons(self):\n self.but_run = QtWidgets.QPushButton('Run') \n self.but_status = QtWidgets.QPushButton('Status') \n self.but_brow = QtWidgets.QPushButton('View') \n self.but_remove = QtWidgets.QPushButton('Remove files') \n\n self.hboxB = QtWidgets.QHBoxLayout()\n self.hboxB.addWidget(self.but_run)\n self.hboxB.addWidget(self.but_status)\n self.hboxB.addWidget(self.but_brow)\n self.hboxB.addStretch(1) \n self.hboxB.addWidget(self.but_remove)\n\n self.but_run.clicked.connect(self.onRun)\n self.but_status.clicked.connect(self.onStatus)\n self.but_brow.clicked.connect(self.onBrow)\n self.but_remove.clicked.connect(self.onRemove)", "def buttons_dict(phrase):\n switcher = {\n '처음으로': ['병원 정보', '병원 위치', '병원 운영시간', '병원 프로모션'],\n '병원 정보': ['의료진', '병원 사진', '병원 진료과목', '병원 전화하기'],\n '병원 프로모션': ['프로모션 A', '프로모션 B', '프로모션 C'],\n '의료진': ['홍길동 피부과 전문의', '김제인 마취과 전문의', '김존 피부과 전문의'],\n '병원 사진': ['내부', '건물', '진료실']\n }\n default_buttons = []\n return switcher.get(phrase, default_buttons) + ['처음으로']", "def create_buttons(self):\n buttons = []\n for i in range(9):\n buttons.append(tk.Button(self.game_frame, bg=\"white\", relief=\"ridge\", height=1, width=3,\n command=self.get_button_callback(i), font=(self.FONT, 48)))\n return buttons", "def button_captions(self, obj):\n return {\n 'BO_SAVE_CAPTION': '',\n 'BO_SAVE_AS_NEW_CAPTION': '',\n 'BO_SAVE_AND_CONT_CAPTION': '',\n 'BO_SAVE_AND_ADD_ANOTHER_CAPTION': '',\n 'BO_DELETE_CAPTION': '',\n }", "def make_buttons(self):\n # Color options.\n hovered_color = self.graphics.HIGHLIGHT\n disabled_color = self.graphics.DIM\n\n # Easy Button.\n easy_button = Button(Point(1, 4), \"Easy\")\n easy_button.set_hovered_color(hovered_color)\n easy_button.set_inactive_color(disabled_color)\n easy_button.set_action(self.mk_easy_field)\n self.uielements.append(easy_button)\n\n # Medium Button.\n medium_button = Button(Point(1, 5), \"Medium\")\n medium_button.set_hovered_color(hovered_color)\n medium_button.set_inactive_color(disabled_color)\n medium_button.set_action(self.mk_medium_field)\n self.uielements.append(medium_button)\n\n # Hard Button.\n hard_button = Button(Point(1, 6), \"Hard\")\n hard_button.set_hovered_color(hovered_color)\n hard_button.set_inactive_color(disabled_color)\n hard_button.set_action(self.mk_hard_field)\n self.uielements.append(hard_button)\n\n # Custom Button.\n custom_button = Button(Point(1, 7), \"Custom\")\n custom_button.set_hovered_color(hovered_color)\n custom_button.set_inactive_color(disabled_color)\n custom_button.set_action(self.mk_custom_field)\n self.uielements.append(custom_button)\n\n # Keep track of Buttons.\n self.buttons = [easy_button, medium_button, hard_button, custom_button]\n\n self.selected = easy_button\n easy_button.set_hovered(True)", "def create_buttons(self):\r\n # The buttons are created in the center of the screen then offset in the x/y directions by a number of button\r\n # widths. E.g. The \"-1, 0\" for the easy button means to shift the button one button width left of center.\r\n self.easy_button = Button(self.ai_game, \"Easy\", -1, 0)\r\n self.normal_button = Button(self.ai_game, \"Normal\", 0, 0)\r\n self.hard_button = Button(self.ai_game, \"Hard\", 1, 0)\r\n self.quit_button = Button(self.ai_game, \"Quit\", 0, 1)\r\n self.buttons = (self.easy_button, self.normal_button,\r\n self.hard_button, self.quit_button)", "def createButtonsWithTabs(self, reclassificationDict):\n gridLayout = QtGui.QGridLayout()\n tabWidget = QtGui.QTabWidget()\n tabWidget.setTabPosition(QtGui.QTabWidget.West)\n gridLayout.addWidget(tabWidget)\n self.scrollArea.setWidget(tabWidget)\n \n for category in reclassificationDict.keys():\n if category in ['version', 'uiParameterJsonDict']:\n continue\n sortedButtonNames = []\n formLayout = QtGui.QFormLayout()\n scrollArea = self.createWidgetWithTabs(formLayout)\n tabWidget.addTab(scrollArea, category)\n for edgvClass in reclassificationDict[category].keys():\n for button in reclassificationDict[category][edgvClass].keys():\n sortedButtonNames.append(button)\n sortedButtonNames.sort()\n for button in sortedButtonNames: \n pushButton = self.createButton(button)\n formLayout.addRow(pushButton)", "def test_06_CreateJson(self):\n l_buttons = self.m_api.read_all_buttons_xml(self.m_pyhouse_obj, self.m_xml.button_sect, self.m_version)\n # print('ButtonsS: {0:}'.format(l_buttons))\n # print('Button 0: {0:}'.format(vars(l_buttons[0])))\n l_json = json_tools.encode_json(l_buttons)\n # print('JSON: {0:}'.format(l_json))", "def buttons(self):\n return self._buttons", "def buttons(self):\n return self._buttons", "def buttons(self):\n return self._buttons", "def createButtons(self, reclassificationDict, createTabs=False):\n self.buttons = []\n widget = self.scrollArea.takeWidget()\n if createTabs:\n self.createButtonsWithTabs(reclassificationDict)\n else:\n self.createButtonsWithoutTabs(reclassificationDict)\n self.turnButtonsOn(self.newFeatureRadioButton.isChecked())", "def make_play_mode_buttons(self):\r\n play_button_list = []\r\n play_button_1a = Button(self._screen,\"1 Atom Random\", 200, 162, 1)\r\n play_button_list.append(play_button_1a)\r\n play_button_2a = Button(self._screen, \"2 Atoms Random\", 500, 162, 2)\r\n play_button_list.append(play_button_2a)\r\n play_button_3a = Button(self._screen, \"3 Atoms Random\", 200, 350, 3)\r\n play_button_list.append(play_button_3a)\r\n play_button_4a = Button(self._screen, \"4 Atoms Random\", 500, 350, 4)\r\n play_button_list.append(play_button_4a)\r\n play_button_5a = Button(self._screen, \"5 Atoms Random\", 200, 537, 5)\r\n play_button_list.append(play_button_5a)\r\n play_button_6a = Button(self._screen, \"Manual 4 Atoms\", 500, 537,\r\n \"4m\")\r\n play_button_list.append(play_button_6a)\r\n\r\n return play_button_list", "def createButtonsOnInterface(self, dlg):\n #reclassification dictionary made from the field setup file\n self.reclassificationDict = dlg.makeReclassificationDict()\n #button size defined by the user\n self.size = dlg.slider.value()\n #check if the button must be grouped by category\n withTabs = dlg.checkBox.isChecked()\n #actual button creation step\n self.createButtons(self.reclassificationDict, withTabs)", "def init_all_buttons(self) -> bool:\n raise NotImplementedError", "def init_buttons(self):\r\n self.btn_encrypt = QtWidgets.QPushButton('Encrypt')\r\n self.btn_encrypt.clicked.connect(self.encrypt)\r\n self.btn_encrypt.setEnabled(False)\r\n\r\n self.btn_decrypt = QtWidgets.QPushButton('Decrypt')\r\n self.btn_decrypt.clicked.connect(self.decrypt)\r\n self.btn_decrypt.setEnabled(False) \r\n\r\n self.layout_buttons = QtWidgets.QGridLayout()\r\n\r\n self.layout_buttons.addWidget(self.btn_encrypt,0,0)\r\n self.layout_buttons.addWidget(self.btn_decrypt,0,1)", "def place_buttons(self):\n tk.Button(self.parent, text='^', command=self.up_callback).grid(row=0, column=1)\n tk.Button(self.parent, text='v', command=self.down_callback).grid(row=2, column=1)\n tk.Button(self.parent, text='>', command=self.right_callback).grid(row=1, column=2)\n tk.Button(self.parent, text='<', command=self.left_callback).grid(row=1, column=0)\n tk.Button(self.parent, text='<-', command=self.back_callback).grid(row=0, column=0)\n tk.Button(self.parent, text='OK', command=self.ok_callback).grid(row=1, column=1)\n tk.Button(self.parent, text='<<', command=self.rewind_callback).grid(row=3, column=0)\n tk.Button(self.parent, text='>||', command=self.pp_callback).grid(row=3, column=1)\n tk.Button(self.parent, text='>>', command=self.pp_callback).grid(row=3, column=2)\n\n tk.Button(self.parent, text='HOME', command=self.home_callback).grid(row=0, column=3)" ]
[ "0.7700739", "0.7666546", "0.75279486", "0.7462629", "0.7061244", "0.68338764", "0.67945945", "0.67594445", "0.664118", "0.662812", "0.66155005", "0.6602516", "0.655123", "0.654498", "0.6542364", "0.6532947", "0.6517129", "0.6507127", "0.6499752", "0.64661855", "0.6426109", "0.6410199", "0.6410199", "0.6410199", "0.6364099", "0.6343821", "0.6307113", "0.6296912", "0.6248818", "0.62402207" ]
0.84233403
0
Delete a file using the num
def delete(self, num): file_name = self.games[num] file_path = path.join(self.saved_games, file_name) if path.exists(file_path): os.remove(file_path) logger.info("Remove the file %s", file_path) else: logger.error("The file %s doesn't existe", file_path) minimap_types = ["cover", "fog"] for _type in minimap_types: name = file_name.split('.json')[0] name_path = path.join(self.saved_minimap, f"{name}-{_type}.png") if path.exists(name_path): os.remove(name_path) logger.info("Remove the file %s", name_path) else: logger.error("The file %s doesn't existe", name_path) pg.event.wait() self.refresh()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, filename):\n pass", "def delete_file(filename):\n\tprint client.file_delete(filename)", "def _cleanup(self, fnum):\n while os.path.exists('%s.%s' % (self.name, fnum)):\n try:\n fname = '%s.%s' % (self.name, fnum)\n os.unlink(fname)\n # self.log.debug(\"Cleaned up file: %s\", fname)\n except:\n pass\n fnum -= 1", "def delete(self, filename):\n raise NotImplementedError", "def delete_file(name):\n subprocess.check_output(cmd_preamble + [\"rm\", name])", "def deleteSingleFile(filename):\n os.popen('rm {}'.format(filename))", "def delete_file(self, name):\n del self.files[name]", "def _delete(filename):\n return os.remove(filename)", "def delete_file(self, lfile):\n raise NotImplementedError('delete_file')", "def delete_line(command):\n try:\n if len(command) > 1:\n my_file.delete_num(int(command[1]))\n else:\n my_file.delete_num()\n except FileNotFoundError:\n print('No file has been read yet')\n except ValueError:\n print('The line number is not an integer')\n except IndexError:\n print('Line number is out of range')", "def delete(self, file_id: str):\n file_path = self._path_to_file(file_id)\n os.remove(file_path)\n del self.index[file_id]", "def delete(self, host, file):", "def delete(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n\n try:\n os.remove(file_path)\n except OSError:\n pass", "def delete_file(file_id):\n file_obj = Data.objects.get(id=file_id)\n print(\"Removing file: \", file_obj.name)\n print(file_obj.file.path)\n file_dir = file_obj.file.path\n os.remove(file_dir)\n print(\"Done.\")", "def delete_file(file):\n for _ in range(10):\n print(\"deleting\", file)\n try:\n if path.isfile(file):\n remove(file)\n except Exception as error:\n sleep(5)\n print('Delete failed, retrying...', error)\n else:\n break", "def delete_file(self, path):\n raise HTTPError(\n 501,\n \"Narrative deletion not implemented here. Deletion is handled elsewhere.\",\n )", "def delete_num(self, num):\r\n saved = task2.ListADT()\r\n saved.append(\"d\")\r\n if num == \"\":\r\n saved.append(0)\r\n for line_num in range(len(self.text_lines)):\r\n saved.append(self.text_lines[0])\r\n self.text_lines.delete(0)\r\n else:\r\n num = int(num)\r\n if num == 0:\r\n raise ValueError(\"Zero is not a valid line number\")\r\n elif num > 0:\r\n num -= 1\r\n saved.append(num)\r\n saved.append(self.text_lines[num])\r\n self.text_lines.delete(num)\r\n self.memory.push(saved)", "async def delete_file(location_id: LocationID, file_id: StorageFileID, user_id: UserID):", "def delete_file(input_fn):\r\n if os.path.isfile(input_fn):\r\n os.remove(input_fn)", "def delete_file(filename: str):\n\t\tif filename == \"ALL\":\n\t\t\tfor file in os.listdir(\"data/music/\"):\n\t\t\t\tdeleted = False\n\t\t\t\twhile not deleted:\n\t\t\t\t\ttry:\n\t\t\t\t\t\tos.remove(f\"data/music/{file}\")\n\t\t\t\t\t\tdeleted = True\n\t\t\t\t\texcept Exception as e:\n\t\t\t\t\t\tprint(\"Not removed, waiting 1 second...\")\n\t\t\t\t\t\tasyncio.sleep(1)\n\t\telse:\n\t\t\tprint(\"File--: \", filename)", "def delete_file(self, filename):\n if not filename in self.files:\n raise IOError('File %s Not Found' % filename)\n\n for nodename in self.files[filename]:\n node = self.datanodes[nodename]\n node.delete_file(filename)\n del self.files[filename]\n logging.info('file %s deleted' % filename)", "def delete(self, filename, threadID):\n self.lock.acquire()\n removed_file = self.files_on_disk.pop(filename)\n\n # remove file from the directory\n if not os.path.exists(\".storage/\" + filename):\n self.lock.release()\n return \"ERROR: NO SUCH FILE\\n\"\n else:\n os.remove(\".storage/\" + filename)\n i = 0\n j = 0\n while(i<self.size and j<removed_file.num_blocks):\n if(self.disk_mem[i]==removed_file.letter):\n self.disk_mem[i] = \".\"\n j+=1\n i += 1\n print(\"[thread %d] Deleted %s file '%c' (deallocated %d blocks)\" %\n (threadID, removed_file.name, removed_file.letter, removed_file.num_blocks))\n self.show(threadID)\n self.lock.release()\n return \"ACK\\n\"", "def delete_files(src_files):\n for i, src_file in enumerate(src_files):\n sys.stdout.write(str(i + 1) + ': ' + src_file + '\\n')\n subprocess.call(['rm', src_file])", "def rm_file(file_):\n Path(file_).unlink(missing_ok=True)", "def delete_file(self, key):\n path = os.path.join(self.directory, self.subdirectory, key)\n if os.path.isfile(path):\n os.unlink(path)\n else:\n raise ValueError(f\"No such file: {key}\")", "def delete_file(self, filepath):\n self.ftp.delete(filepath)", "def delete_file(path):\n return files.delete_file(path)", "def deleteDocumentFromPhone(file):\n\tprint \"Removing %s from target device...\" % file\n\tcmd =r\"adb shell rm -r %s\" % file\n\tos.system(cmd)\n\tprint \"Finished removing file from phone.\"", "def Delete_File(self,txn,filename):\n opid = self.new_opid()\n xaction = DeleteFile_Operation(os.path.join(self.home,filename),opid)\n self._add_operation(txn,xaction)", "def delete_file(self, filename: str, directory: str = 'gcodes') -> Dict:\n raise NotImplementedError" ]
[ "0.71110225", "0.6855006", "0.6764861", "0.67404574", "0.65595174", "0.6554298", "0.6536192", "0.65095913", "0.6488798", "0.64819276", "0.6439041", "0.6333991", "0.6290185", "0.62683415", "0.62036484", "0.61600226", "0.61180073", "0.60894287", "0.60656404", "0.60478294", "0.60162336", "0.6007262", "0.6000013", "0.5987962", "0.59865445", "0.596138", "0.59521335", "0.59463954", "0.5926005", "0.5899289" ]
0.72493964
0
Listen for delete btns
def events_delete_btns(self): events = pg.event.get() for btn in self.delete_btns: btn.listen(events)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_IN_DELETE(self, event):", "def delete_callback(self):\n pass", "def process_IN_DELETE_SELF(self, event):", "def on_delete(key):\n pass", "def _notify_delete(self, cuds_object):", "def delete():\n click.echo('delete was called.')", "def link_delete_callback(self):\n pass", "def delete_button_callback(self, button):\n\t\tRPIO.del_interrupt_callback(button)", "def delete():", "def delete_event(self,widget=None):\n self.on_device_dialog_cancel_clicked()\n return True", "def on_delete(self, payload):\n pass", "def delete(self):\n ...", "def keyPressEvent(self, event):\n if (event.key() == QtCore.Qt.Key_Backspace or event.key() == QtCore.Qt.Key_Delete):\n self.deleteBtns()", "def on_press_delete(self):\n\n os.close(self.tempFile[0])\n os.remove(self.tempFile[1])\n self.recordBtn.setEnabled(True)\n self.recordBtn.setIcon(QIcon(r'.\\assets\\record.png'))\n self.deleteBtn.setEnabled(False)\n self.saveBtn.setEnabled(False)\n self.inputDropDown.setEnabled(True)\n self.canvasStack.setCurrentWidget(self.microphoneCanvas)\n self.state = 'Waiting'", "def on_deleteButton_clicked(self):\n itm = self.protocolHandlersList.selectedItems()[0]\n self.__manager.removeProtocolHandler(itm.text(0))\n \n self.protocolHandlersList.takeTopLevelItem(\n self.protocolHandlersList.indexOfTopLevelItem(itm))\n del itm", "def delete_event(self, widget, event, Data=None):\n\t\tself.todolist.save()\n\t\tgtk.main_quit()\n\t\treturn False", "def on_delete():\r\n del win.box[-1] # delete last line\r\n #del win.box[0:-1] # delete all lines \r", "def on_delete(self, status_id, user_id):\n #print \"Delete notice for %s. %s\" % (status_id, user_id)\n return", "def on_delete(self, status_id, user_id):\n log.debug(\"Received status deletion notice: %d\", status_id)", "def beforeDelete(self):", "def Remove(self, event):\n pass", "def after_delete(self, obj, st):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self):\n pass", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n return 0", "def _callback_delete(self, chat_id, user_id, args, update):\n msg_id = update[\"callback_query\"][\"message\"][\"message_id\"]\n \n if len(args) == 3 and args[1] == str(user_id):\n if args[2] == 'all':\n self.db.delete_sprueche(args[1])\n self.tclient.edit_message_text('Alle gespeicherten Nasensprüche wurden gelöscht', chat_id, msg_id)\n elif args[2] == 'stop':\n self.tclient.edit_message_text('Löschvorgang beendet.', chat_id, msg_id)\n else:\n self.db.delete_spruch(args[1], args[2])\n keyboard = self.build_inline_keyboard_delete(user_id)\n if keyboard == None:\n self.tclient.edit_message_text('Alle gespeicherten Nasensprüche wurden gelöscht', chat_id, msg_id)\n else:\n self.tclient.edit_message_text('Nasenspruch wurde gelöscht.\\nMöchtest du weitere Sprüche löschen?'.format(args[2]), chat_id, msg_id, keyboard)" ]
[ "0.7130073", "0.689843", "0.6717145", "0.6677733", "0.66146654", "0.6503768", "0.6409408", "0.6375576", "0.6330751", "0.6244449", "0.6194749", "0.6190247", "0.6078673", "0.6068437", "0.6011176", "0.6002316", "0.5930668", "0.58698416", "0.5839492", "0.58290255", "0.58205634", "0.5815902", "0.572575", "0.572575", "0.572575", "0.572575", "0.5720504", "0.5720504", "0.57084453", "0.5699989" ]
0.7375204
0
Converts image of type torch.tensor to numpy.ndarray for matplotlib display image
def convert_tensor_to_numpy_img(tensor_img): img = tensor_img.to('cpu').clone().detach() img = img.numpy().squeeze(0) img = img.transpose(1, 2, 0) img = img * np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406)) img = img.clip(0, 1) return img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def tensor2im_raw(input_image, imtype=np.float64):\n if not isinstance(input_image, np.ndarray):\n if isinstance(input_image, torch.Tensor): # get the data from a variable\n image_tensor = input_image.data\n else:\n return input_image\n image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array\n # print(image_numpy.shape)\n # image_numpy = np.transpose(image_numpy, (1, 2, 0)) \n \n return image_numpy.astype(imtype)", "def to_tensor(pic):\n if not _is_numpy_image(pic):\n raise TypeError('pic should be ndarray. Got {}'.format(type(pic)))\n # handle numpy array\n img = torch.from_numpy(pic.transpose((2, 0, 1)))\n # backward compatibility\n if isinstance(img, torch.ByteTensor):\n return img.float().div(255)\n else:\n return img", "def image_to_tensor(image):\n image_tensor = image.transpose(2, 0, 1)\n image_tensor = image_tensor.astype(np.float32)\n image_tensor = torch.from_numpy(image_tensor)\n if torch.cuda.is_available():\n image_tensor = image_tensor.cuda()\n return image_tensor", "def im_to_numpy(tensor):\n tensor_reshaped = tensor.expand(3, *tensor.shape[1:]).permute(1, 2, 0)\n return tensor_reshaped.detach().cpu().numpy()", "def tensor_to_image(img):\n t = transforms.ToPILImage()\n return t(img)", "def convert_image_to_tensor(image):\n # image = image.astype(np.float32)\n return transform(image)\n # return transform(image)", "def _torch_to_numpy(tensor):\n return tensor.detach().cpu().numpy()", "def to_numpy_image(pic):\n if not isinstance(pic, torch.Tensor):\n raise TypeError('pic should be tensor. Got {}'.format(type(pic)))\n\n assert pic.dim() == 3\n npimg = pic\n if pic.is_floating_point():\n pic = pic.mul(255).byte()\n npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0))\n return npimg", "def pyplot_to_tensor(pyplot_figure):\n x = pyplot_to_numpy(pyplot_figure=pyplot_figure)\n return x", "def tensor2im_raw_sss(input_image, imtype=np.float64):\n if not isinstance(input_image, np.ndarray):\n if isinstance(input_image, torch.Tensor): # get the data from a variable\n image_tensor = input_image.data\n else:\n return input_image\n image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array\n # print(image_numpy.shape)\n image_numpy = np.tile(image_numpy, (3, 1, 1))\n image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0\n \n return image_numpy.astype(imtype)", "def to_numpy(tensor):\n raise NotImplementedError", "def __to_tensor__(data: Union[int, float, np.ndarray, List]) -> torch.tensor:\n\n if isinstance(data, int) or len(data.shape) <= 2:\n tensor = torch.tensor(data=data)\n else:\n tensor = ToTensorV2()(image=data)['image']\n\n return tensor", "def to_img(t):\n return torchvision.transforms.ToPILImage()(t)", "def to_tensor(pic):\n if not _is_numpy(pic):\n raise TypeError('pic should be ndarray. Got {}'.format(type(pic)))\n\n if _is_numpy(pic) and not _is_numpy_image(pic):\n raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim))\n\n # handle numpy array\n if pic.ndim == 2:\n pic = pic[:, :, None]\n\n img = torch.from_numpy(pic.transpose((2, 0, 1)))\n # backward compatibility\n if isinstance(img, torch.ByteTensor):\n return img.float().div(255)\n else:\n return img", "def im_convert(tensor):\n \n image = tensor.to(\"cpu\").clone().detach()\n image = image.numpy().squeeze()\n image = image.transpose(1,2,0)\n image = image * np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406))\n image = image.clip(0, 1)\n\n return image", "def pil_to_tensor(img):\n img = ToTensor()(img)\n return img", "def tensor_to_im(tensor):\n return tensor.reshape(-1, *tensor.shape[2:])[:, None, :, :]", "def convert_image(tensor):\n image = tensor.to('cpu').clone().detach()\n image = image.numpy().squeeze()\n image = image.transpose(1, 2, 0)\n \"\"\" Un-normalize \"\"\"\n image = image * np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406))\n return image.clip(0, 1)", "def images_to_numpy(tensor):\n\n imgs = tensor.data.cpu().numpy()\n imgs = imgs.transpose(0, 2, 3, 1) # (B, C, H, W) -> (B, H, W, C)\n imgs = np.clip(imgs, -1, 1)\n imgs = (imgs + 1) / 2 * 255\n imgs = imgs.astype('uint8')\n\n return imgs", "def tensor_img_to_npimg(tensor_img):\n if not (torch.is_tensor(tensor_img) and tensor_img.ndimension() == 3):\n raise NotImplementedError(\"Not supported tensor image. Only tensors with dimension CxHxW are supported.\")\n npimg = np.transpose(tensor_img.numpy(), (1, 2, 0))\n npimg = npimg.squeeze()\n assert isinstance(npimg, np.ndarray) and (npimg.ndim in {2, 3})\n return npimg", "def to_image(tensor):\n # Converts to PIL image\n unloader = transforms.ToPILImage()\n # Clone the tensor to CPU\n image = tensor.cpu().clone()\n # Remove fake batch dimension\n image = image.squeeze(0)\n # Convert to PIL image\n image = unloader(image)\n return image", "def create_img_tensor(img):\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n img_tensor = Variable(normalize(torch.from_numpy(np.transpose(img.astype(np.float32) / 255, (2, 0, 1)))))\n img_tensor = img_tensor.cuda()\n img_tensor = img_tensor.unsqueeze_(0)\n\n return img_tensor", "def multi_tensor2im(input_image, imtype=np.float64):\n if not isinstance(input_image, np.ndarray):\n if isinstance(input_image, torch.Tensor): # get the data from a variable\n image_tensor = input_image.data\n else:\n return input_image\n image_numpy = image_tensor[0].cpu().float().numpy() # convert it into a numpy array\n \n print_multi_numpy(image_numpy, val=True, shp=True)# max 255.0 min 0.0\n \n image_numpy = (np.transpose(image_numpy, (0, 2, 3, 1)) + 1.0) / 2.0 * 255.0 # post-processing: tranpose and scaling\n \n else: # if it is a numpy array, do nothing\n image_numpy = input_image\n return image_numpy.astype(imtype)", "def to_tensor(data):\n if np.iscomplexobj(data):\n data = np.stack((data.real, data.imag), axis=-1)\n return torch.from_numpy(data)", "def to_tensor(data):\n if np.iscomplexobj(data):\n data = np.stack((data.real, data.imag), axis=-1)\n return torch.from_numpy(data)", "def to_tensor(data):\n if np.iscomplexobj(data):\n data = np.stack((data.real, data.imag), axis=-1)\n return torch.from_numpy(data)", "def np2tensor(array, device=None):\n tensor = torch.from_numpy(array)\n return tensor", "def arr(img_arr, img_wid, img_hei):\n X = torch.Tensor(img_arr).view(-1, 1, img_wid, img_hei)\n X = X/255.0\n return X", "def convert_image_to_tensor(image_path: str, output_path: str) -> None:\n image = np.asarray(Image.open(image_path).resize(\n (224, 224))) / 255.0\n\n if (image.shape == (224, 224, 4)):\n image = image[:, :, 0] * 0.21 + image[:, :, 1] * 0.72 + image[:, :,\n 2] * 0.07\n rgb_batch = torch.tensor(np.expand_dims(\n np.repeat(image[..., np.newaxis], 3, -1), axis=0)).\\\n transpose(1, 3).transpose(2, 3).float()\n features = Alexnet.features(rgb_batch)\n features_tensor = torch.from_numpy(features.detach().numpy())\n torch.save(features_tensor.squeeze(0), output_path)", "def to_tensor(self): \n raise NotImplementedError" ]
[ "0.7048809", "0.6880173", "0.68093735", "0.6792789", "0.6757762", "0.67456317", "0.67265105", "0.6707813", "0.6678656", "0.665678", "0.6621333", "0.66024065", "0.6597689", "0.6589251", "0.65755355", "0.6522425", "0.6516791", "0.651404", "0.6482893", "0.64826477", "0.64555347", "0.64528465", "0.64266413", "0.64057654", "0.64057654", "0.64057654", "0.6372379", "0.6369428", "0.63436145", "0.6309335" ]
0.70222735
1
Displays two images side by side
def display_side_by_side_imgs(left_img, right_img, left_title='Left Image', right_title='Right Image', figsize=(16, 8)): # Convert the images to numpy arrays if they aren't already if isinstance(left_img, torch.Tensor): left_img = convert_tensor_to_numpy_img(left_img) if isinstance(right_img, torch.Tensor): right_img = convert_tensor_to_numpy_img(right_img) # Create plots and set titles fig, (ax1, ax2) = plt.subplots(1, 2, figsize=figsize) ax1.imshow(left_img) ax2.imshow(right_img) ax1.title.set_text(left_title) ax2.title.set_text(right_title) plt.show()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_2gr(img1,img2,title1='',title2=''):\n\tf, (ax1,ax2) = plt.subplots(1,2,figsize=(20,10))\n\tax1.imshow(img1, cmap='gray')\n\tax1.set_title(title1)\n\tax2.imshow(img2, cmap='gray')\n\tax2.set_title(title2)\n\tplt.show()", "def AppendImages(im1, im2):\r\n im1cols, im1rows = im1.size\r\n im2cols, im2rows = im2.size\r\n im3 = Image.new('RGB', (im1cols+im2cols, max(im1rows,im2rows)))\r\n im3.paste(im1,(0,0))\r\n im3.paste(im2,(im1cols,0))\r\n return im3", "def displayImages(self):\n\n plt.figure(figsize=(8,6))\n plt.subplot(1,2,1)\n plt.imshow( self.original_image, cmap=\"gray\")\n plt.title(\"Original Image\")\n plt.subplot(1,2,2)\n plt.imshow( self.blurred_image, cmap=\"gray\")\n plt.title(\"Blurred Image\")", "def show_four_images(img1, img2, img3, img4, title):\n shape = (460, 250)\n # Get all images in same size for better display\n img1 = cv2.resize(img1, shape)\n img2 = cv2.resize(img2, shape)\n img3 = cv2.resize(img3, shape)\n img4 = cv2.resize(img4, shape)\n # combined 2 images horizontally\n numpy_horizontal1 = np.hstack((img1, img2))\n # combined the rest 2 images horizontally\n numpy_horizontal2 = np.hstack((img3, img4))\n # now combined all vertically to 1 image and display\n numpy_vertical = np.vstack((numpy_horizontal1, numpy_horizontal2))\n # final thing - show the output:\n show_image(numpy_vertical, title)", "def main():\n space_ship = SimpleImage(\"images/MillenniumFalcon.png\")\n figure = SimpleImage(\"images/ReyGreenScreen.png\")\n result = combine(space_ship, figure)\n result.show()", "def main():\n fg = SimpleImage('image_contest/me.jpg')\n bg = SimpleImage('image_contest/house.png')\n bg.make_as_big_as(fg)\n combined_img = combine(bg, fg)\n combined_img.show()", "def cli(fig1, fig2, out):\n click.echo('\\n' + '.' * 50)\n\n # open first image\n image1 = Image.open(fig1)\n\n # open second image\n image2 = Image.open(fig2)\n\n # retrieve the image dimensions.\n width, height = image1.size\n width2, height2 = image2.size\n\n if [width, height] != [width2, height2]:\n print(\"Image dimensions do not match! The Two inputs must have equal dimensions\")\n exit(1)\n else:\n print(\"Fig1 dimensions: \", image1.size)\n print(\"Fig2 dimensions: \", image2.size)\n # Create a new image object.\n merged = Image.new('RGB', image1.size)\n\n for i in range(0, width):\n for j in range(0, height):\n ima1 = list(image1.getpixel((i, j)))\n ima2 = list(image2.getpixel((i, j)))\n if ima1 == ima2:\n r, g, b, a = ima1\n elif [ima1[0], ima1[1], ima1[2]] == [0, 0, 0] and [ima2[0], ima2[1], ima2[2]] != [0, 0, 0]:\n r, g, b, a = ima2\n elif [ima1[0], ima1[1], ima1[2]] != [0, 0, 0] and [ima2[0], ima2[1], ima2[2]] == [0, 0, 0]:\n r, g, b, a = ima1\n elif [ima1[0], ima1[1], ima1[2]] != [0, 0, 0] and ima2 == [255, 255, 255, 255]:\n r, g, b, a = ima1\n elif [ima2[0], ima2[1], ima2[2]] != [0, 0, 0] and ima1 == [255, 255, 255, 255]:\n r, g, b, a = ima2\n else:\n # print ima1,ima2\n r = (ima1[0] + ima2[0]) // 2\n g = (ima1[1] + ima2[1]) // 2\n b = (ima1[2] + ima2[2]) // 2\n a = 255\n # print [r,g,b,a]\n\n merged.putpixel((i, j), (r, g, b, a))\n merged.save(out)\n click.echo('\\n' + '.' * 50)", "def DisplayMatches(im1, im2, matched_pairs):\r\n im3 = AppendImages(im1,im2)\r\n offset = im1.size[0]\r\n draw = ImageDraw.Draw(im3)\r\n for match in matched_pairs:\r\n draw.line((match[0][1], match[0][0], offset+match[1][1], match[1][0]),fill=\"red\",width=2)\r\n im3.show()\r\n return im3", "def join_images_horizontally(images):\n array = np.concatenate((images[0], images[1]), axis=1)\n return Image.fromarray(np.uint8(array))", "def show_images(images, cols = 1, titles = None):\n params = {'axes.titlesize': 8,\n 'axes.labelsize': 8,\n 'font.size': 8,\n 'legend.fontsize': 8,\n 'xtick.labelsize': 8,\n 'ytick.labelsize': 8,\n 'font.family': 'DejaVu Serif',\n 'font.serif': 'Computer Modern',\n }\n plt.rcParams.update(params)\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n plt.title(\"Point Shift Sweeps from -30 to 30\")\n \n for n, (image, title) in enumerate(zip(images, titles)):\n \n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n a.get_yaxis().set_visible(False)\n a.get_xaxis().set_visible(False)\n\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image, origin='lower') \n fig.set_size_inches(np.array(fig.get_size_inches()))\n \n\n plt.show()", "def appendimages(im1, im2):\n row1 = im1.shape[0]\n row2 = im2.shape[0]\n\n if row1 < row2:\n im1 = concatenate((im1, zeros((row2 - row1, im1.shape[1]))), axis=0)\n elif row1 > row2:\n im2 = concatenate((im2, zeros((row1 - row2, im2.shape[1]))), axis=0)\n\n return concatenate((im1, im2), axis=1)", "def display_images(filenames):\n for filename in filenames:\n display(Image(filename))", "def im2(data1, data2, xlab='', ylab='', tit='', bar=False, newfig=True, \\\n cl=None, x=[], y=[], fontsize=16):\n from pylab import figure, subplot, colorbar, xlabel, ylabel, title, clim\n from nsdata import imshow\n\n if newfig: figure()\n subplot(211)\n imshow(data1,x=x,y=y); \n if clim<>None: clim(cl)\n if bar: colorbar()\n xlabel(xlab, fontsize=fontsize); ylabel(ylab, fontsize=fontsize)\n title(tit)\n\n subplot(212)\n imshow(data2,x=x,y=y); \n if clim<>None: clim(cl)\n if bar: colorbar()\n xlabel(xlab, fontsize=fontsize); ylabel(ylab, fontsize=fontsize)\n\n return", "def verticalConcat(image1, image2):\n shape1 = image1.shape\n shape2 = image2.shape\n if shape1[1] > shape2[1]:\n resizeMaintainAspectRatio(image2, width=shape1[1])\n elif shape2[1] > shape1[1]:\n resizeMaintainAspectRatio(image1, width=shape2[1])\n\n return np.hstack((image1, image2))", "def displayImg(self):\r\n\r\n\t# If you want to skip n frames, set value to 0 to see all images\r\n\tSKIP = 4500\r\n for idx in range(len(self.centers)):\r\n\t if idx < SKIP:\r\n\t\tcontinue\r\n file_left = self.lefts[idx][5]\r\n file_center = self.centers[idx][5]\r\n file_right = self.rights[idx][5]\r\n\r\n img_left = cv2.imread(os.path.join(self.pathDir, file_left), \\\r\n cv2.IMREAD_COLOR)\r\n img_center = cv2.imread(os.path.join(self.pathDir, file_center), \\\r\n cv2.IMREAD_COLOR)\r\n img_right = cv2.imread(os.path.join(self.pathDir, file_right), \\\r\n cv2.IMREAD_COLOR)\r\n\r\n\t #Resize the image to 50%\r\n img_l = cv2.resize(img_left, None, fx=0.5, fy=0.5, \\\r\n interpolation = cv2.INTER_LINEAR)\r\n img_c = cv2.resize(img_center, None, fx=0.5, fy=0.5, \\\r\n interpolation = cv2.INTER_LINEAR)\r\n img_r = cv2.resize(img_right, None, fx=0.5, fy=0.5, \\\r\n interpolation = cv2.INTER_LINEAR)\r\n \r\n height, width = img_c.shape[:2]\r\n new_img = np.zeros((height, width*3, img_c.shape[2]),\r\n np.uint8)\r\n\r\n #Adding sequence numbers and Time\r\n\t #Left\r\n strTime = self.timestampToStr(self.lefts[idx][1])\r\n\t self.putTextToImg(img_l, self.lefts[idx][0], strTime, height)\r\n\t #Center\r\n\t strTime = self.timestampToStr(self.centers[idx][1])\r\n\t self.putTextToImg(img_c, self.centers[idx][0], strTime, height)\r\n\t #Right\r\n\t strTime = self.timestampToStr(self.rights[idx][1])\r\n\t self.putTextToImg(img_r, self.rights[idx][0], strTime, height)\r\n\t \r\n\t angle = float(self.angles_at_timestamps[idx])\r\n\t speed = float(self.speed_at_timestamps[idx])\r\n\r\n\t print \"speed: %f - angle: %f\" % (speed, angle)\r\n\r\n\t self.draw_path_on(img_c, speed, angle)\r\n\r\n\t #Generate the new image\r\n for i in range(height):\r\n new_img[i] = np.concatenate((img_l[i, : ], img_c[i, : ], \\\r\n img_r[i, : ]))\r\n \r\n\r\n cv2.imshow('Udacity Challenge 2 - Viewer', new_img)\r\n key = cv2.waitKey(30)\r\n\r\n # Press q to exit\r\n if key == ord('q'):\r\n break\r\n\r\n cv2.destroyAllWindows()", "def transform_images(img1,img2):", "def overlay_image(image_to_show, image_to_add, directory=None):\n \n direcx = \"/Documents/1.4.5 Images/\"#put a condition pls\n if directory == None:\n directory = os.getcwd()\n print directory\n new_image = PIL.Image.open(directory + direcx + image_to_show)\n new_imager = PIL.Image.open(directory + direcx + image_to_show)\n new_imager.show()\n new_image2 = PIL.Image.open(directory + direcx + image_to_add)\n new_image2.show()\n \n width, height = new_imager.size\n print str(width) + \" \" + str(height)\n \n new_image22 = new_image2.resize((width,height), PIL.Image.ANTIALIAS)\n new_image22.show()\n\n new_imager.paste(new_image22, (0,0), new_image22)\n new_imager.show()\n #cohrt = PIL.Image.blend(new_imager, new_image22, alpha=0.5)\n #cohrt.show() #broked", "def hflip(self):\n self.leftimg, self.rightimg = self.rightimg, self.leftimg", "def combine_pictures(images):\n widths, heights = zip(*(i.size for i in images))\n\n total_width = sum(widths)\n max_height = max(heights)\n\n new_im = Image.new('RGB', (total_width, max_height))\n\n x_offset = 0\n for im in images:\n new_im.paste(im, (x_offset, 0))\n x_offset += im.size[0]\n\n new_im.save('test.jpg')\n\n return True", "def display_similar_for_img(img_dir, img_name,img_map, indices, disp_num):\n images = get_similar_imgs(img_name, img_map, indices, disp_num, img_dir)\n for img in images:\n display(Image(img))", "def combine(imA, imB):\n # check image sizes\n if imA.size != imB.size:\n raise Exception(\"cannot combine two images with different sizes\")\n \n height, width = imA.size\n total_width = width * 2\n imC = Image.new('RGB', (total_width, height))\n x_offset = 0\n for im in [imA, imB]:\n imC.paste(im, (x_offset, 0))\n x_offset += im.size[1]\n return imC", "def concatenate_vert(images, deltaW, offsetW, offsetH):\n \n images = map(Image.open, images)\n W = max(img.size[0] for img in images)\n H = sum(img.size[1] for img in images)\n\n result = Image.new(\"RGBA\", (W, H))\n\n result.paste(images[0], (0, 0))\n \n # re-sizing \n new_width = images[0].size[0]-deltaW\n ratio = new_width/float(images[1].size[0])\n new_height = int(images[1].size[1]*ratio)\n \n img = images[1].resize((new_width, new_height), Image.ANTIALIAS)\n result.paste(img, (offsetW, images[0].size[1]-offsetH))\n result.save('result.png')", "def _crossing_over(self, img_ext_1, img_ext_2) -> ExtendedImage:\n # Copy first extended image\n new_member = img_ext_1.img.copy()\n height = img_ext_2.get_height()\n\n # Add the right half of the 2nd image to copy of the 1st image\n new_member[0:, (height // 2):, :3] = img_ext_2.img[0:, (height // 2):, :3]\n return ExtendedImage(new_member)", "def overlay_image(image_to_show, image_to_add, directory=None):\n \n direcx = \"/Documents/1.4.5 Images/\"#put a condition pls\n if directory == None:\n directory = os.getcwd()\n print directory\n new_image = PIL.Image.open(directory + direcx + image_to_show)\n new_imager = PIL.Image.open(directory + direcx + image_to_show)\n new_imager.show()\n new_image2 = PIL.Image.open(directory + direcx + image_to_add)\n new_image2.show()\n width, height = new_imager.size\n print str(width) + \" \" + str(height)\n #print new_image\n #print new_image2\n #if image_to_show == \"\":\n # print_directory_list2()\n # return \"Use one of these\"\n new_image22 = new_image2.resize((width,height), PIL.Image.ANTIALIAS)\n new_image22.show()\n\n new_imager.paste(new_image22, (width, height), new_image22)\n new_imager.show()\n #cohrt = PIL.Image.blend(new_imager, new_image22, alpha=0.5)\n #cohrt.show()", "def image_comparison(unaligned_image_ccd_lst,aligned_image_ccd_lst,stacked_img_ccd,outputs_path,obsdate):\n source_hdu = CCDData(unaligned_image_ccd_lst[0],unit='adu')\n source_image_hdr = source_hdu.header\n run_filename = source_image_hdr['RUN'].strip(' ')\n target_name = source_image_hdr['FIELD'].strip(' ')\n exptime = source_image_hdr['EXPTIME']\n chip_num = source_image_hdr['CHIP']\n \n # compare unaligned vs aligned images\n for i, unaligned_img in enumerate(unaligned_image_ccd_lst[1:]):\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10), tight_layout=True)\n \n # source_hdu = CCDData(unaligned_image_ccd_lst[0],unit='adu')\n image_hdr = unaligned_img.header\n run_filename = image_hdr['RUN'].strip(' ')\n target_name = image_hdr['FIELD'].strip(' ')\n exptime = image_hdr['EXPTIME']\n chip_num = image_hdr['CHIP']\n \n show_image(unaligned_img, cmap='gray', ax=ax1, fig=fig, percl=90)\n ax1.set_title('Unaligned Image for {}-{}-{}-{}s ({})'.format(run_filename,target_name,chip_num,exptime,obsdate))\n \n show_image(aligned_image_ccd_lst[i], cmap='gray', ax=ax2, fig=fig, percl=90)\n ax2.set_title('Aligned Image for {}-{}-{}-{}s ({})'.format(run_filename,target_name,chip_num,exptime,obsdate))\n \n plt.savefig(outputs_path/\"unaligned_vs_aligned_{}-{}-{}-{}.jpg\".format(run_filename,target_name,chip_num,exptime),dpi=900)\n plt.show()\n \n # compare source image to stacked image\n fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10), tight_layout=True)\n \n show_image(unaligned_image_ccd_lst[0], cmap='gray', ax=ax1, fig=fig, percl=90)\n ax1.set_title('Unaligned Source Image for {}-{}-{}s ({})'.format(target_name,chip_num,exptime,obsdate))\n \n show_image(stacked_img_ccd, cmap='gray', ax=ax2, fig=fig, percl=90)\n ax2.set_title('Aligned Stacked Image for {}-{}-{}s ({})'.format(target_name,chip_num,exptime,obsdate))\n \n plt.savefig(outputs_path/\"source_vs_stacked_{}-{}-{}.jpg\".format(target_name,chip_num,exptime),dpi=900)\n plt.show()", "def main():\n me = SimpleImage(\"images/me.JPG\")\n dinosaur = SimpleImage(\"images/dinosaur.jpg\")\n\n dinosaur.make_as_big_as(me)\n combine = magic(me, dinosaur)\n combine.show()", "def display(self):\n nrow = 2\n ncol = len(self.views) + 1\n rows = [(self.views[0].original, len(self.views)),\n (self.views[0].image, len(self.views) + 1)]\n fig, axes = plt.subplots(nrows=nrow, ncols=ncol,\n figsize=self._figsize(rows),\n squeeze=True)\n originals = [(v.position.id, v.original) for v in self.views] + [\n ('combined', np.median(np.stack([v.original for v in self.views]), axis=0))]\n warped = [(v.position.id, v.image) for v in self.views] + [\n ('combined', self.image)]\n for ax, (title, img) in zip(axes.ravel(), originals + warped):\n ax.imshow(img)\n ax.axis('off')\n ax.xaxis.set_visible(False)\n ax.yaxis.set_visible(False)\n ax.set(title=title)\n fig.tight_layout()\n fig.canvas.draw()\n img_array = np.array(fig.canvas.renderer._renderer)\n plt.close('all')\n return img_array", "def concat_images_horiz(imga, imgb):\n ha,wa = imga.shape[:2]\n hb,wb = imgb.shape[:2]\n max_height = np.max([ha, hb])\n total_width = wa+wb\n new_img = np.zeros(shape=(max_height, total_width, 3), dtype=np.uint8)\n new_img[:ha,:wa]=imga\n new_img[:hb,wa:wa+wb]=imgb\n return new_img", "def _execute_with_array_two_images(self, image1, image2):\n\t\tpil_image1 = [PIL.Image.fromarray((image1*255.0).astype('uint8'))]\n\t\tpil_image2 = [PIL.Image.fromarray((image2*255.0).astype('uint8'))]\n\t\tprint(pil_image1)\n\t\tprint(pil_image2)\n\t\tfor operation in self.operations:\n\t\t\tr = np.round(random.uniform(0, 1), 1)\n\t\t\tif r <= operation.probability:\n\t\t\t\tnew_seed = random.random()\n\t\t\t\trandom.seed(new_seed)\n\t\t\t\tpil_image1 = operation.perform_operation(pil_image1)\n\t\t\t\trandom.seed(new_seed)\n\t\t\t\tpil_image2 = operation.perform_operation(pil_image2)\n\n\t\t# numpy_array1 = np.asarray(pil_image1).astype('float32')/255.0\n\t\t# numpy_array2 = np.asarray(pil_image2).astype('float32')/255.0\n\t\tnumpy_array1 = np.array(pil_image1[0]).astype(np.float32)\n\t\tnumpy_array2 = np.array(pil_image2[0]).astype(np.float32)\n\n\t\treturn numpy_array1,numpy_array2", "def plot_image_comparison(name, img_arr):\n\n plt.clf()\n fig = plt.figure()\n\n # divide the images into rows and columns\n num_imgs = len(img_arr)\n columns = num_imgs // 2\n rows = math.ceil(num_imgs / columns)\n\n for i, vals in enumerate(img_arr):\n fig.add_subplot(rows, columns, i+1)\n plt.imshow(vals[\"img\"], vmin=0, vmax=255)\n plt.axis(\"off\")\n plt.title(vals[\"title\"], fontsize=8)\n\n plt.savefig(f\"{name}/comparison.jpeg\")" ]
[ "0.6887343", "0.6828774", "0.67714393", "0.66529375", "0.6530886", "0.6474752", "0.64064336", "0.64017236", "0.6242224", "0.6232678", "0.621642", "0.62152064", "0.61897266", "0.60866994", "0.60815114", "0.60736537", "0.60567325", "0.60536975", "0.6027479", "0.6007779", "0.59986746", "0.5997857", "0.59757984", "0.59716177", "0.5964933", "0.5951021", "0.59364897", "0.59339494", "0.5933624", "0.5931756" ]
0.77245486
0
Loads the content and style images and resizes them if appropriate
def load_content_and_style_images(content_dir, style_dir, max_allowable_size=400, resize_shape=None): content_img = Image.open(content_dir).convert('RGB') style_img = Image.open(style_dir).convert('RGB') if max(content_img.size) > max_allowable_size: size = max_allowable_size else: size = max(content_img.size) if resize_shape is not None: size = resize_shape content_img_transforms = transforms.Compose([transforms.Resize(size),\ transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) content_img = content_img_transforms(content_img)[:3,:,:].unsqueeze(0) style_img_transforms = transforms.Compose([transforms.Resize(content_img.shape[-2:]),\ transforms.ToTensor(), transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225))]) style_img = style_img_transforms(style_img)[:3,:,:].unsqueeze(0) return content_img, style_img
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initImages(self):\n pass", "def initImages(self):\n pass", "def initImages(self):\n pass", "def load(cls):\n\n cls.images[\"Wall\"] = pygame.image.load(\n \"ressources/images/wall.png\").convert()\n cls.images[\"MacGyver\"] = pygame.image.load(\n \"ressources/images/Mac.png\").convert()\n cls.images[\"Guardian\"] = pygame.image.load(\n \"ressources/images/Guardian.png\").convert()\n cls.images[\"Path\"] = pygame.image.load(\n \"ressources/images/path.png\").convert()\n cls.images[\"Tube\"] = pygame.image.load(\n \"ressources/images/tube.png\").convert()\n cls.images[\"Ether\"] = pygame.image.load(\n \"ressources/images/ether.png\").convert()\n cls.images[\"Needle\"] = pygame.image.load(\n \"ressources/images/needle.png\").convert()\n cls.images[\"gr\"] = pygame.image.load(\n \"ressources/images/but_du_jeu.png\").convert()", "def process_images():\n image_path = os.path.join(settings.BASE_DIR, 'themes/CMESH/assets/img/')\n static_images = os.path.join(settings.BASE_DIR, 'static/CMESH/img/')\n\n copy_files(image_path, static_images)", "def _load(self):\r\n\t\t\r\n\t\tself.image.blit(self.sheet.sheet, (0,0), (self.x, self.y, self.size, self.size))", "def preprocess(self):\n print(\"processing content images...\")\n for dir_item in self.selectedContent:\n join_path = Path(self.content_image_dir,dir_item.replace('/','_'))\n if join_path.exists():\n print(\"processing %s\"%dir_item,end='\\r')\n images = join_path.glob('*.%s'%(self.subffix))\n for item in images:\n self.content_dataset.append(item)\n else:\n print(\"%s dir does not exist!\"%dir_item,end='\\r')\n label_index = 0\n print(\"processing style images...\")\n for class_item in self.selectedStyle:\n images = Path(self.style_image_dir).glob('%s/*.%s'%(class_item, self.subffix))\n for item in images:\n self.art_dataset.append([item, label_index])\n label_index += 1\n random.seed(self.random_seed)\n random.shuffle(self.content_dataset)\n random.shuffle(self.art_dataset)\n # self.dataset = images\n print('Finished preprocessing the Art Works dataset, total image number: %d...'%len(self.art_dataset))\n print('Finished preprocessing the Content dataset, total image number: %d...'%len(self.content_dataset))", "def __init__(self, content_image_dir,style_image_dir,\n selectedContent,selectedStyle,\n content_transform,style_transform,\n subffix='jpg', random_seed=1234):\n self.content_image_dir= content_image_dir\n self.style_image_dir = style_image_dir\n self.content_transform= content_transform\n self.style_transform = style_transform\n self.selectedContent = selectedContent\n self.selectedStyle = selectedStyle\n self.subffix = subffix\n self.content_dataset = []\n self.art_dataset = []\n self.random_seed= random_seed\n self.preprocess()\n self.num_images = len(self.content_dataset)\n self.art_num = len(self.art_dataset)", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def loadImagesTag(self): \n dictionary = {}\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(GENDER_FRONT)\n dictionary[\"gender\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SKIN_BACK)\n dictionary[\"skin\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(HEAD_BACK)\n dictionary[\"head\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(BODY_BACK)\n dictionary[\"body\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(MASK_BACK)\n dictionary[\"mask\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(HAIR_BACK)\n dictionary[\"hair\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n if self.avatarConfiguration[\"gender\"] == \"boy\":\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SHIRT_BACK)\n dictionary[\"shirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(TROUSERS_BACK)\n dictionary[\"trousers\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SKIRT_BACK)\n dictionary[\"skirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n else:\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SHIRT_DISABLED)\n dictionary[\"shirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(TROUSERS_DISABLED)\n dictionary[\"trousers\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SKIRT_BACK)\n dictionary[\"skirt\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n imgPath = GG.genteguada.GenteGuada.getInstance().getDataPath(SHOES_BACK)\n dictionary[\"shoes\"] = guiobjects.OcempImageButtonTransparent(imgPath)\n return dictionary", "def load_images():\n print(\"[+] UPDATE - Begin loading images\")\n\n colors = [\"w\", \"b\"]\n piece_types = [\"p\", \"R\", \"N\", \"B\", \"K\", \"Q\"]\n for color in colors:\n for type in piece_types:\n piece = color + type\n IMAGES[piece] = p.transform.scale(p.image.load(\"images/\" + piece + \".png\"), (SQ_SIZE, SQ_SIZE))\n\n print(\"[+] UPDATE - Images loaded\")", "def load_image(self, **kwargs):\n ...", "def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets", "def assets():", "def _load_disk(self):\r\n s = self.file_string + ' '\r\n im = Image.open(self.file_string)\r\n\r\n self.ix, self.iy = im.size\r\n s += '(%s)' % im.mode\r\n self.alpha = (im.mode == 'RGBA' or im.mode == 'LA')\r\n\r\n if self.mipmap:\r\n resize_type = Image.BICUBIC\r\n else:\r\n resize_type = Image.NEAREST\r\n\r\n # work out if sizes > MAX_SIZE or coerce to golden values in WIDTHS\r\n if self.iy > self.ix and self.iy > MAX_SIZE: # fairly rare circumstance\r\n im = im.resize((int((MAX_SIZE * self.ix) / self.iy), MAX_SIZE))\r\n self.ix, self.iy = im.size\r\n n = len(WIDTHS)\r\n for i in xrange(n-1, 0, -1):\r\n if self.ix == WIDTHS[i]:\r\n break # no need to resize as already a golden size\r\n if self.ix > WIDTHS[i]:\r\n im = im.resize((WIDTHS[i], int((WIDTHS[i] * self.iy) / self.ix)),\r\n resize_type)\r\n self.ix, self.iy = im.size\r\n break\r\n\r\n if VERBOSE:\r\n print('Loading ...{}'.format(s))\r\n\r\n if self.flip:\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n\r\n RGBs = 'RGBA' if self.alpha else 'RGB'\r\n self.image = im.convert(RGBs).tostring('raw', RGBs)\r\n self._tex = ctypes.c_int()\r\n if 'fonts/' in self.file_string:\r\n self.im = im", "def refreshImages(self):\n fileName1 = \"DECK/\" + str(self.card1) + \".gif\"\n fileName2 = \"DECK/\" + str(self.card2) + \".gif\"\n fileName3 = \"DECK/\" + str('b') + \".gif\"\n self.image1 = PhotoImage(file = fileName1)\n self.cardLabel1[\"image\"] = self.image1\n self.image2 = PhotoImage(file = fileName2)\n self.cardLabel2[\"image\"] = self.image2\n self.image3 = PhotoImage(file = fileName3)\n self.cardLabel3[\"image\"] = self.image3", "def build_assets(self):\n theme = self.theme\n \n # ~ self.assets_dir = cwd + \"/CenterSide_Themes/\" + theme + \"/\"\n \n \n \n \n \n \n # ~ self.blank_langmssg = QPixmap(\"blank_langmssg.svg\")\n # ~ self.blank_thememssg = QPixmap(\"blank_thememssg.svg\")\n \n \n \n \n \n # ~ self.icon_info = QIcon(\"Icons/info.svg\")\n # ~ self.icon_intructions = QIcon(\"Icons/instructions.svg\")\n # ~ self.icon_internet = QIcon(\"Icons/internet.svg\")\n # ~ self.icon_invite = QIcon(\"Icons/invite.svg\")\n # ~ self.icon_languages = QIcon(\"Icons/languages.svg\")\n # ~ self.icon_local = QIcon(\"Icons/local.svg\")\n # ~ self.icon_message = QIcon(\"Icons/message.svg\")\n # ~ self.icon_name = QIcon(\"Icons/name.svg\")\n # ~ self.icon_options = QIcon(\"Icons/options.svg\")\n # ~ self.icon_palettes = QIcon(\"Icons/palettes.svg\")\n \n # ~ self.icon_quit = QIcon(\"Icons/quit.svg\")\n # ~ self.icon_refresh = QIcon(\"Icons/refresh.svg\")\n # ~ self.icon_shop = QIcon(\"Icons/shop.svg\")\n # ~ self.icon_soundon = QIcon(\"Icons/soundon.svg\")\n # ~ self.icon_soundoff = QIcon(\"Icons/soundoff.svg\")\n # ~ self.icon_vsAI = QIcon(\"Icons/vsAI.svg\")", "def build_filler_images(self):", "def load(self):\n # Frame\n self.frame.grid_configure(row=0, column=0, columnspan=4, padx=PAD, pady=(TINY_PAD, PAD), sticky=tk.W+tk.E)\n self.frame.columnconfigure(0, weight=1)\n # Crossword title\n self.title_label.config(**settings.get(\"style:title\"))\n self.title_label.grid(row=0, column=0, pady=(0, PAD), sticky=tk.W)\n # Crossword author\n self.author_label.config(**settings.get(\"style:author\"))\n self.author_label.grid(row=0, column=0, padx=TINY_PAD, pady=(0, PAD), sticky=tk.E)\n # Separator\n self.separator.config(height=SEPARATOR_HEIGHT, bg=SEPARATOR_COLOR)\n self.separator.grid(row=1, padx=TINY_PAD, sticky=tk.W+tk.E)", "def handle_images(lyx_path, blog_dir, assets_rel_dir, front_matters,\n update=True):\n\n our_fm = front_matters.our_fm\n\n assets_rel_dir = os.path.normpath(assets_rel_dir)\n\n # The images are created in a directory of the form\n # blog_path/assets_rel_dir/date-html_fname\n # so that images of different articles are in separate directories.\n date_html_fname = front_matters.get_date_html_fname()\n rel_dest_dir = os.path.join(assets_rel_dir, date_html_fname)\n dest_dir = os.path.join(blog_dir, rel_dest_dir)\n\n image_info = []\n name_to_num = {}\n\n image_num = 1\n image_http_path = None\n image_label = None\n\n # NOTE:\n # - In LyX files, '\\' can only appear in commands, so searching for, say,\n # '\\begin_inset' is safe.\n with open(lyx_path, encoding='utf-8') as f:\n # in_* remember the nesting level; -1 = not inside\n in_graphics = -1\n in_float_figure = -1\n in_label = -1\n\n nesting = 0\n\n for line in f:\n if line.startswith(r'\\begin_inset Float figure'):\n in_float_figure = nesting # we're in\n if line.startswith(r'\\begin_inset Graphics'):\n in_graphics = nesting # we're in\n if (line.startswith(r'\\begin_inset CommandInset label') and\n in_float_figure != -1): # only if in float figure\n in_label = nesting # we're in\n\n we_were_in = (in_graphics != -1 or\n in_float_figure != -1 or\n in_label != -1)\n\n # We handle the nesting of begin_ and end_inset.\n if line.startswith(r'\\begin_inset'):\n nesting += 1\n elif line.startswith(r'\\end_inset'):\n nesting -= 1\n if in_graphics == nesting:\n in_graphics = -1 # we're out\n if in_float_figure == nesting:\n in_float_figure = -1 # we're out\n if in_label == nesting:\n in_label = -1 # we're out\n\n we_are_in = (in_graphics != -1 or\n in_float_figure != -1 or\n in_label != -1)\n\n if we_were_in and not we_are_in: # we exited\n # We write the data collected so far.\n if image_http_path is None:\n raise Exception(\"LyX file: couldn't get image http path!\")\n image_info.append(image_http_path)\n if image_label:\n name_to_num[image_label] = str(image_num)\n image_num += 1\n\n # reset\n image_http_path = None\n image_label = None\n\n if in_graphics != -1:\n # format:\n # filename discrete fgfg.svg\n m = re.match(r'\\s*filename\\s+(.+)$', line)\n if m:\n src_path = m[1]\n base_name = os.path.basename(src_path)\n dest_path = os.path.join(dest_dir, base_name)\n if not update and os.path.exists(dest_path):\n raise Exception('Already exists: ' + dest_path)\n\n # Create the directory and copy the file\n os.makedirs(dest_dir, exist_ok=True)\n shutil.copy(src_path, dest_path)\n\n # Return the blog-relative path of the copied image\n image_http_path = ('/' + assets_rel_dir + '/' +\n date_html_fname + '/' + base_name)\n\n if in_float_figure != -1 and in_label != -1:\n # format:\n # name \"fig:label_per_figure\"\n m = re.match(r'\\s*name\\s+\"([^\"]+)\"$', line)\n if m:\n image_label = m[1]\n\n return image_info, name_to_num", "def __init__(self, path):\n folder, name = os.path.split(path)\n file = open(path, 'rb')\n content_file = ContentFile(file.read(), name=name)\n super(OnDiscPlaceholderImage, self).__init__(content_file, name)", "def load_images(self):\n\n self.scrabble_board = pygame.image.load(os.path.join(\"Images\", \"scrabble_board.png\"));\n self.scrabble_board = pygame.transform.scale(self.scrabble_board, (self.BOARD_SIZE, self.BOARD_SIZE));\n\n for i in range(26):\n img_letter_name = \"letter_\" + chr(65+i) + \".png\"\n img_letter_path = os.path.join(\"Images\", \"Letters\", img_letter_name);\n\n img_letter = pygame.image.load(img_letter_path);\n\n case_size = int((self.BOARD_SIZE-self.BOARD_PADDING*2)/15);\n\n self.l_img_letter.append(img_letter);\n\n img_joker_path = os.path.join(\"Images\", \"Letters\", \"joker.png\");\n img_joker = pygame.image.load(img_joker_path);\n\n self.l_img_letter.append(img_joker);\n\n self.img_loop = pygame.image.load(os.path.join(\"Images\", \"loop.png\"));\n self.img_background_save = pygame.image.load(os.path.join(\"Images\", \"background_save.png\"));", "def RefreshThumbnail(self):\n if not self.property:\n self.bmp = None\n return\n\n path = self.property.DoGetValue()\n\n if not os.path.isfile(path):\n self.bmp = None\n return\n\n image = wx.Image(path)\n image.Rescale(64, 64)\n self.bmp = wx.BitmapFromImage(image)", "def load_all_resources():\n\n # Load the fonts\n ResourcesManager._load_font(\"Munro.ttf\")\n\n # Load images\n ResourcesManager.HIBER_NATION_IMG = ResourcesManager._load_image(\"hiber_nation.png\")\n ResourcesManager.SHIP_IMG = ResourcesManager._load_image(\"ship.png\")\n ResourcesManager.MISSILE_IMG = ResourcesManager._load_image(\"missile.png\")\n\n # Load sounds\n # ResourcesManager.MENU_MUSIC = ResourcesManager._load_sound(\"menu.ogg\")", "def _update_layout(self):\n for i in self._MODES:\n self._parts[i].set_texture(self._prefix + i, \"skin\", resize=True)\n\n # Top and Left\n self._parts[\"Top\"].width = \"100%\"\n self._parts[\"Top\"].margin = (0, self._parts[\"TR\"].width, 0, self._parts[\"TL\"].width)\n\n self._parts[\"Left\"].height = \"100%\"\n self._parts[\"Left\"].margin = (self._parts[\"TL\"].height, 0, self._parts[\"BL\"].height, 0)\n\n # Mid\n self._parts[\"Mid\"].set_size(\"100%\", \"100%\")\n self._parts[\"Mid\"].margin = (self._parts[\"Top\"].height, self._parts[\"Right\"].width,\n self._parts[\"Bottom\"].height, self._parts[\"Left\"].width)\n\n # Bottom and Right\n self._parts[\"Bottom\"].width = \"100%\"\n self._parts[\"Bottom\"].margin = (0, self._parts[\"BR\"].width, 0, self._parts[\"BL\"].width)\n self._parts[\"Bottom\"].bottom = 0\n\n self._parts[\"Right\"].height = \"100%\"\n self._parts[\"Right\"].margin = (self._parts[\"TR\"].height, 0, self._parts[\"BR\"].width, 0)\n self._parts[\"Right\"].right = 0\n\n # Corners\n self._parts[\"TL\"].top_left = 0, 0\n self._parts[\"TR\"].top_right = 0, 0\n self._parts[\"BL\"].bottom_left = 0, 0\n self._parts[\"BR\"].bottom_right = 0, 0", "def load(self):\r\n self.create_effect_classes()\r\n\r\n self._add_resource_descriptions_to_pools(self.create_external_resources())\r\n self._add_resource_descriptions_to_pools(self.create_resources())\r\n\r\n for meta, resource in resources.textures.load_pool():\r\n self._textures[meta.label] = resource\r\n\r\n for meta, resource in resources.programs.load_pool():\r\n self._programs[meta.label] = resource\r\n\r\n for meta, resource in resources.scenes.load_pool():\r\n self._scenes[meta.label] = resource\r\n\r\n for meta, resource in resources.data.load_pool():\r\n self._data[meta.label] = resource\r\n\r\n self.create_effect_instances()\r\n self.post_load()", "def _load_image(path):\r\n image = Image.open(path)\r\n size = image.size\r\n \r\n image = image.resize((550,550), Image.ANTIALIAS)\r\n# image = image.thumbnail((200,200), Image.ANTIALIAS)\r\n return image", "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def process_images(self):\n self.processed_content_image = tf.keras.applications.vgg19.preprocess_input(\n self.content_image)\n self.processed_style_image = tf.keras.applications.vgg19.preprocess_input(\n self.style_image)", "def back_image(path_file):\n\n path = os.getcwd()+path_file\n dirs = os.listdir(path)\n CEToolkit.contador_wraps -= 1\n if CEToolkit.contador_wraps < 0:\n CEToolkit.contador_wraps = len(dirs) - 1\n path_image = path + dirs[CEToolkit.contador_wraps]\n parent.ui.label_design_image.setPixmap(QtGui.QPixmap(path_image))" ]
[ "0.6116834", "0.6116834", "0.6116834", "0.6100597", "0.58552134", "0.5844597", "0.57763934", "0.5733679", "0.5689809", "0.56692624", "0.56571907", "0.5629311", "0.5606127", "0.5604215", "0.54896915", "0.5474187", "0.5467254", "0.5459076", "0.5442688", "0.54306614", "0.54275334", "0.54168016", "0.5416031", "0.5409938", "0.54062724", "0.54047024", "0.5387935", "0.53819233", "0.53547853", "0.53420544" ]
0.6643331
0
Check if item is a container (list, tuple, dict, set).
def is_container(item): if isinstance(item, str): return False elif hasattr(item, "__iter__"): return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_is_container(self):\n # verify ----------------------\n try:\n 1 in self.collection\n except TypeError:\n msg = \"'Collection' object is not container\"\n self.fail(msg)", "def is_container(value: object) -> TypeGuard[AnyContainer]:\n if isinstance(value, Container):\n return True\n if hasattr(value, \"__pt_container__\"):\n return is_container(cast(\"MagicContainer\", value).__pt_container__())\n return False", "def is_container_type_correct(self, container):\r\n return container in self.containers", "def is_iterable_container(value):\n # strings are iterable too so we have to treat them as a special case\n return not isinstance(value, str) and isinstance(value, collections.Iterable)", "def is_collection(var):\n return isinstance(var, Iterable) and not isinstance(var, str)", "def _is_iterable(items):\n return isinstance(items, (list, tuple, set, np.ndarray))", "def _is_dict(item):\n return isinstance(item, dict)", "def is_item_iterable(item):\n try:\n _ = [_ for _ in item]\n except TypeError:\n return False\n return True", "def _is_list(item):\n return isinstance(item, list)", "def is_collection(obj):\n return type(obj) in COLLECTIONS_SET", "def isList(self, item):\n\t retval = False\n\t if type(item) in (ListType, TupleType) :\n\t retval = True", "def is_container(self):\n return (self.__type & NODE_TAG) and self.children", "def is_collection_subclass(obj):\n #TODO add UserDict\n return ((issubclass(obj.__class__, COLLECTIONS) or\n is_list_like(obj)) and\n not is_collection(obj))", "def is_iterable(element):\n return isinstance(element, (set, list, tuple))", "def isSequence(obj):\n # type: (Any) -> bool\n return isinstance(obj, Sequence)", "def list_check(lst):\n for item in lst:\n if type(item) != list:\n return False\n return True", "def check_container_contains_item(context, container, item):\n assert_true(context.uuid[item] in get_container(context, container)[f\"{item}s\"])", "def is_sequence_of_dict(items):\n return all(isinstance(item, dict) for item in items)", "def is_scalar(value):\n return not isinstance(value, (list, tuple, dict))", "def check_if_nested(data):\n if isinstance(data, dict):\n for k in data:\n if isinstance(data[k], (list, dict)):\n return True\n elif isinstance(data, list):\n for i in data:\n if isinstance(i, (list, dict)):\n return True\n return False", "def isItem(obType,iType):\n if iType == 'sword':\n return isinstance(obType,Sword)\n elif iType == 'axe':\n return isinstance(obType,Axe)\n elif iType == 'shield':\n return isinstance(obType,Shield)\n elif iType == 'helmet':\n return isinstance(obType,Helmet)\n else:\n pass\n # raise SystemError('Bad item type {} in isItem'.format(iType))", "def is_quantity(x):\n return isinstance(x, Quantity)", "def is_string(item: Any) -> bool:\n if isinstance(item, (bytes, bytearray, str)):\n return True\n elif (isinstance(item, (tuple, list)) and all(is_string(x) for x in item)):\n return True\n elif (isinstance(item, np.ndarray) and # binary or unicode\n (item.dtype.kind in (\"U\", \"S\") or item.dtype == object)):\n return True\n return False", "def __contains__(self, item: Any) -> bool:\n try:\n return item in self.contents\n except TypeError:\n try:\n return item is self.contents\n except TypeError:\n return item == self.contents # type: ignore", "def type_check(typ, obj):\n type_s = type_str(typ) # convert to string if necessary\n\n nest_depth = type_s.count(\"List\")\n assert type_s.count(\"[\") == nest_depth, \"type_check only supports List for now, no Sets, Dicts, Tuples, ...\"\n\n assert type_s.startswith(\"List[\" * nest_depth) and type_s.endswith(\"]\" * nest_depth)\n base_type = {\"bool\": bool, \"int\": int, \"float\": float, \"str\": str}[type_s[5 * nest_depth:len(type_s) - nest_depth]]\n\n def helper(depth, o):\n if depth == 0:\n return type(o) is base_type\n else:\n return type(o) is list and all(helper(depth - 1, i) for i in o)\n\n return helper(nest_depth, obj)", "def is_sequence_of_list(items):\n return all(isinstance(item, list) for item in items)", "def is_sequence(item):\n return (not hasattr(item, \"strip\") and\n (hasattr(item, \"__getitem__\") or hasattr(item, \"__iter__\")))", "def check_type(content):\n return (isinstance(content, Elem) or type(content) == Text or\n (type(content) == list and all([type(elem) == Text or\n isinstance(elem, Elem)\n for elem in content])))", "def is_sequence_of_iterable(items):\n return all(is_item_iterable(item) for item in items)", "def is_str_or_coll(value):\n return bool(is_str(value)) or bool(is_tuple_or_list(value))" ]
[ "0.7344212", "0.71971273", "0.7153404", "0.6666673", "0.6665677", "0.6573487", "0.6531076", "0.65197587", "0.6327977", "0.6293779", "0.62678146", "0.61821836", "0.61464983", "0.6091407", "0.5965194", "0.5935714", "0.59311116", "0.58759165", "0.58688784", "0.5863264", "0.580105", "0.5781967", "0.5756845", "0.574117", "0.57362574", "0.5730782", "0.57256496", "0.5686696", "0.5644728", "0.562349" ]
0.8535093
0
Copies all "filesets" found within the nested value (e.g. dict, list,...) into the destination directory. If no nested filesets are found then the original value is returned. Note that multiple nested filesets (e.g. a list) will to have unique names names (i.e. not differentiated by parent directories) otherwise there will be a path clash in the destination directory.
def copy_nested_files( value: ty.Any, dest_dir: os.PathLike, supported_modes: FileSet.CopyMode = FileSet.CopyMode.any, **kwargs, ) -> ty.Any: from ..utils.typing import TypeParser # noqa cache: ty.Dict[FileSet, FileSet] = {} def copy_fileset(fileset: FileSet): try: return cache[fileset] except KeyError: pass supported = supported_modes if any(MountIndentifier.on_cifs(p) for p in fileset.fspaths): supported -= FileSet.CopyMode.symlink if not all( MountIndentifier.on_same_mount(p, dest_dir) for p in fileset.fspaths ): supported -= FileSet.CopyMode.hardlink copied = fileset.copy(dest_dir=dest_dir, supported_modes=supported, **kwargs) cache[fileset] = copied return copied return TypeParser.apply_to_instances(FileSet, copy_fileset, value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_tree(self, src, dst):\n srcset_fmt = self.kw['image_srcset_format']\n srcset_sizes_all = self.kw['image_srcset_sizes']\n base_len = len(src.split(os.sep))\n for root, dirs, files in os.walk(src, followlinks=True):\n root_parts = root.split(os.sep)\n dst_dir = os.path.join(dst, *root_parts[base_len:])\n utils.makedirs(dst_dir)\n for src_name in files:\n if (not src_name.lower().endswith(tuple(self.image_ext_list)) and not src_name.upper().endswith(tuple(self.image_ext_list))):\n continue\n dst_file = os.path.join(dst_dir, src_name)\n src_file = os.path.join(root, src_name)\n srcset_name, srcset_ext = os.path.splitext(src_name)\n\n # Find out the width of the image so we only resize up to that size\n try:\n src_width = Image.open(src_file).size[0]\n except UnidentifiedImageError:\n # e.g. for SVGs: we don't need srcsets\n src_width = 1\n # then trim our list of sizes to only those below the image width:\n srcset_sizes = [ size for size in srcset_sizes_all if (size < src_width) ]\n \n # Create the list of filenames, starting with the \"max_sized\" version that bears the same name as the original file:\n dsts = [dst_file]\n\n # Now add all the other filenames, based on their size:\n for srcset_size in srcset_sizes:\n srcset_size_file = os.path.join(dst_dir, srcset_fmt.format(\n name = srcset_name,\n size = srcset_size,\n ext = srcset_ext,\n ))\n dsts.append(srcset_size_file)\n\n # If we have extra output formats for images, we need to add them to the list as well:\n for extra_format in self.kw['extra_image_extensions']:\n # First the largest / default image:\n dsts.append(os.path.join(dst_dir, srcset_name + extra_format))\n \n # Then the smaller ones:\n for srcset_size in srcset_sizes:\n srcset_size_file = os.path.join(dst_dir, srcset_fmt.format(\n name = srcset_name,\n size = srcset_size,\n ext = extra_format,\n ))\n dsts.append(srcset_size_file)\n \n yield {\n 'name': dst_file,\n 'file_dep': [src_file],\n 'targets': dsts,\n 'actions': [(self.process_image, (src_file, dsts, srcset_sizes))],\n 'clean': True,\n }", "def copy(self, destination):\n destination = Path(destination)\n src_base = str(self.directory)\n if self.flatten:\n dst_base = destination\n else:\n dst_base = Path(destination.joinpath(self.directory.stem))\n\n for src in self.locations_to_copy:\n if src.is_dir():\n for dir_path, dir_names, file_names in os.walk(str(src)):\n if self.flatten:\n dst_dir = dst_base\n else:\n dst_dir = Path(dir_path.replace(src_base, str(dst_base)))\n if not dst_dir.exists():\n dst_dir.mkdir(parents=True)\n for file in file_names:\n shutil.copy2(os.path.join(dir_path, file), str(dst_dir))\n else:\n if self.flatten:\n dst_dir = dst_base\n else:\n dst_dir = Path(str(src.parent).replace(src_base, str(dst_base)))\n if not dst_dir.exists():\n dst_dir.mkdir(parents=True)\n shutil.copy2(str(src), str(dst_dir))", "def copy_subtree(src, dst):\n for src_f in os.listdir(src):\n src_path = os.path.join(src, src_f)\n if os.path.isdir(src_path):\n dst_path = os.path.join(dst, src_f)\n if not os.path.exists(dst_path):\n shutil.copytree(src_path, dst_path)\n else:\n ProcessJson.copy_subtree(src_path, dst_path)\n elif os.path.isfile(src_path):\n dst_path = os.path.join(dst, src_f)\n if not os.path.exists(dst_path):\n shutil.copy(src_path, dst_path)", "def create_meta_df_valSet(srcFolderPath, labelsPath, destFolderPath, stop='all'):\n # iterate over file in subfolders in labelsPath and retrieve the key values from each\n sfp_lst = os.listdir(labelsPath)\n infiles, outfiles, nFrames = [], [], []\n traversed_tot = 0\n for sf in sfp_lst:\n traversed = 0\n sub_lab_path = os.path.join(labelsPath, sf)\n #sub_src_path = os.path.join(srcFolderPath, sf)\n sub_dest_path = os.path.join(destFolderPath, sf)\n if os.path.isdir(sub_lab_path):\n # create destination path to store the npy file\n if not os.path.exists(sub_dest_path):\n os.makedirs(sub_dest_path)\n \n labfiles = os.listdir(sub_lab_path)\n for lfile in labfiles:\n # if lfile is a json file, then get the subpath key and append\n if os.path.isfile(os.path.join(sub_lab_path, lfile)) and \\\n lfile.rsplit('.', 1)[1] in {'json', 'csv'}:\n lfilepath = os.path.join(sub_lab_path, lfile)\n with open(lfilepath, 'r') as fp:\n label = json.load(fp)\n if label is not None and len(label.keys()) > 0:\n src_file = os.path.join(srcFolderPath, label.keys()[0])\n dest_file = os.path.join(destFolderPath, \\\n label.keys()[0].rsplit('.', 1)[0]+\".npy\")\n if os.path.isfile(dest_file):\n print(\"Feats already present : {}\".format(dest_file))\n continue\n infiles.append(src_file)\n outfiles.append(dest_file)\n nFrames.append(getTotalFramesVid(src_file))\n traversed += 1\n \n if stop != 'all' and traversed == stop:\n break\n traversed_tot += traversed\n \n print \"No. of files to be written to destination : \"+str(traversed_tot)\n if traversed_tot == 0:\n print \"Check the structure of the dataset folders !!\"\n return traversed_tot\n\n ###########################################################################\n #### Form the pandas Dataframe and parallelize over the files.\n filenames_df = pd.DataFrame({\"infiles\":infiles, \"outfiles\": outfiles, \"nframes\": nFrames})\n filenames_df = filenames_df.sort_values([\"nframes\"], ascending=[True])\n filenames_df = filenames_df.reset_index(drop=True)\n \n return filenames_df", "def copy_structure(self, other_directory):\n pass", "def _copy_metadata_deep(value, old_value):\n if value is None or old_value is None or value is old_value: return\n\n if isinstance(value, dict):\n for k, v in value.iteritems():\n _copy_metadata_deep(v, old_value[k])\n elif isinstance(value, list):\n for v, old_v in zip(value, old_value):\n _copy_metadata_deep(v, old_v)\n else:\n try:\n value.__dict__.update(old_value.__dict__)\n except AttributeError:\n pass", "def get_filesets(self):\n\n\t\tif None != self.__filesets:\n\t\t\treturn self.__filesets.get_filesets()\n\t\telse:\n\t\t\treturn None", "def copy_cityscapes(src_dir: Path, datasets: Dict[str, Path], subsets: List[str], dst_dir: Path):\n assert 'l_img' in datasets\n\n print(\"Copying\", datasets.keys(), \"from\", src_dir, \"to\", dst_dir)\n\n regex_map = {\n 'l_img' : ['leftImg8bit', 'leftImg8bit'],\n 'r_img' : ['leftImg8bit', 'rightImg8bit'],\n 'seg' : ['leftImg8bit', 'gtFine_labelIds'],\n 'inst' : ['leftImg8bit', 'gtFine_instanceIds'],\n 'bbox' : ['leftImg8bit.png', 'gtFine_bbox.json'],\n 'disp' : ['leftImg8bit', 'disparity'],\n 'l_seq' : ['leftImg8bit', 'leftImg8bit'],\n 'r_seq' : ['leftImg8bit', 'rightImg8bit'],\n 'cam' : ['leftImg8bit.png', 'camera.json'],\n 'pose' : ['leftImg8bit.png', 'vehicle.json']\n }\n\n for subset in subsets:\n for dirpath, _, filenames in os.walk(os.path.join(src_dir, datasets['l_img'], subset)):\n for filename in filenames:\n if not filename.endswith(IMG_EXT):\n continue\n\n l_imgpath = os.path.join(dirpath, filename)\n foldername = os.path.basename(os.path.dirname(l_imgpath))\n\n for datatype, directory in datasets.items():\n if datatype in ['l_seq', 'r_seq']:\n frame_n = int(re.split(\"_\", filename)[2])\n img_name = filename.replace(\n str(frame_n).zfill(6)+\"_\"+regex_map[datatype][0],\n str(frame_n+1).zfill(6)+\"_\"+regex_map[datatype][1])\n else:\n img_name = filename.replace(\n regex_map[datatype][0], regex_map[datatype][1])\n\n src_path = os.path.join(src_dir, directory, subset, foldername, img_name)\n dst_path = os.path.join(dst_dir, directory, subset, foldername, img_name)\n\n if not os.path.isfile(src_path):\n print(\"Error finding corresponding data to \", l_imgpath)\n continue\n if os.path.isfile(dst_path):\n continue\n\n os.makedirs(os.path.dirname(dst_path), exist_ok=True)\n copy(src_path, dst_path)\n\n print(\"success copying: \", datasets.keys())", "def _recursive_put_files(self, is_subdirectory=False, sub_directory_name=None):\n current_path = os.path.basename(os.getcwd())\n LOG.info(f\"Copying files from the directory '{current_path}'\")\n for path_ in os.listdir():\n # Skip dotfiles and __pycache__\n if path_.startswith('.') or path_.startswith('__'):\n continue\n if os.path.isdir(path_):\n if sub_directory_name is not None:\n dir_name = os.path.join(sub_directory_name, path_)\n else:\n dir_name = path_\n try:\n self._file_explorer.md(dir_name)\n except Exception as e:\n print(e)\n os.chdir(dir_name.split(os.path.sep)[-1])\n self._recursive_put_files(\n is_subdirectory=True,\n sub_directory_name=dir_name,\n )\n else:\n try:\n if sub_directory_name is not None:\n self._file_explorer.put(path_, os.path.join(sub_directory_name, path_))\n else:\n self._file_explorer.put(path_)\n except RemoteIOError as e:\n print(path_, e)\n if is_subdirectory:\n os.chdir(UP_ONE_DIRECTORY)", "def nestedXcopy(namePatterns, sourceDir, targetDir, renameTo=None, flags=None):\n\tfor aDir in dirR.listNestedDirContainsOneOfFilesM(sourceDir, namePatterns, flags):\n\t\txcopy(namePatterns, aDir, os.path.join(targetDir, dirR._relativePathString(sourceDir, aDir)), renameTo, flags)", "def copytree(src, dest):\n shutil.copytree(src, dest)\n restorecon(dest, recursive=True)", "def _clean_datafile_set(self):\n file_list = self._meta['sets']['data file']['items']\n for item in file_list[:]:\n collection = item.split('@')[0]\n variable = item.split('@')[1]\n if not variable in self:\n file_list.remove(item)\n elif collection == 'masks':\n for s in self._get_source_ref(variable):\n while s in file_list:\n file_list.remove(s)\n elif self._is_array_item(variable):\n parent = self.parents(variable)[0]\n if not parent in file_list:\n idx = file_list.index(item)\n file_list[idx] = parent\n while item in file_list:\n file_list.remove(item)\n f_list = []\n for item in file_list:\n if not item in f_list: f_list.append(item)\n self._meta['sets']['data file']['items'] = f_list\n return None", "def copy_tree ( self,\n source_root, dest_root, overwrite=True, followlinks=False\n ):\n dodir = self.dodir\n copy_file = self.copy_file\n\n if overwrite:\n for source, dest, relpath, dirs, files, dirnames in walk_copy_tree (\n source_root, dest_root, followlinks=followlinks\n ):\n for ( source_dir, source_stat ), ( dest_dir, dest_stat ) in dirs:\n dodir ( dest_dir )\n\n for ( source_file, source_stat ), ( dest_file, dest_stat ) in files:\n if followlinks and stat.S_ISLINK ( source_stat ):\n dodir ( dest_file )\n else:\n copy_file ( source_file, dest_file )\n else:\n for source, dest, relpath, dirs, files, dirnames in walk_copy_tree (\n source_root, dest_root, followlinks=followlinks\n ):\n for ( source_dir, source_stat ), ( dest_dir, dest_stat ) in dirs:\n if dest_stat is None:\n dodir ( dest_dir )\n\n for ( source_file, source_stat ), ( dest_file, dest_stat ) in files:\n if dest_stat is None:\n if followlinks and stat.S_ISLINK ( source_stat ):\n dodir ( dest_file )\n else:\n copy_file ( source_file, dest_file )", "def makedir_iterable_structure(structure: Union[dict, list], current_base):\n if structure:\n for top_level_key in structure:\n recur_mkdir(structure, top_level_key, current_base)", "def _clone_defaults(self, source, dest, context):\n\n for base, dirs, files in os.walk(source):\n relative = os.path.relpath(base, source)\n\n for d in dirs:\n os.makedirs(os.path.join(dest, relative, d))\n\n for filename in files:\n\n if not filename.endswith(self.valid_extensions):\n continue\n\n with open(os.path.join(base, filename), 'r') as f:\n data = f.read()\n\n with open(os.path.join(dest, relative, filename), 'w') as f:\n data = jinja2.Template(data).render(**context)\n f.write(data)", "def set_by_path(root, path, value):\n \n sub_data = root\n for key in path[:-1]:\n sub_data = sub_data[key]\n sub_data[path[-1]] = value", "def get_flat_fileset(self):\n return self.config.get_flat_fileset()", "def copy(self, src, dest):\n\n src = os.path.join(os.path.dirname(__file__), \"collections\", \"kitchensink\", src)\n dest = os.path.join(self.checkout, dest)\n if os.path.isdir(src):\n shutil.copytree(src, dest)\n else:\n shutil.copy(src, dest)\n return dest", "def copy_json():\n sourcePath = 'contents/external/'\n targetPath = 'build/external/'\n for base,subdirs,files in os.walk(sourcePath):\n for file in files:\n orig = os.path.join(base, file)\n if os.path.isfile(orig) and file[-5:] == '.json':\n targetBase = os.path.join(targetPath, base[len(sourcePath):])\n dest = os.path.join(targetBase, file)\n puts(\"Checking diretory %s\" % targetBase)\n if not os.path.exists(targetBase):\n puts(yellow(\"Not found! Creating...\"))\n os.makedirs(targetBase)\n puts(\"Copying from %s to %s\" % (orig, dest))\n copyfile(orig, dest)", "def test_nested_settings_files(tmpdir):\n subfolder = tmpdir.mkdir(\"sub\")\n p = subfolder.join(\"settings.json\")\n nested_1_p = subfolder.join(\"nested_1.json\")\n nested_2_p = subfolder.join(\"nested_2.json\")\n\n nested_2_p.write(json.dumps({\"foo\": 1, \"bar\": 2}))\n nested_1_p.write(json.dumps({\"level_2_from_file\": str(nested_2_p)}))\n p.write(\n json.dumps(\n {\n \"level_1_from_file\": str(\n nested_1_p\n ), # nested_1_p references nested_2_p internally.\n \"spam\": \"parrot\",\n \"list\": [\n \"random\",\n {\n \"this_from_file\": str(\n nested_2_p\n ) # dictionaries in lists should be expanded as well.\n },\n ],\n }\n )\n )\n\n climate = core.Climate(prefix=\"TEST_STUFF\", settings_files=[str(p)])\n assert dict(climate.settings) == {\n \"spam\": \"parrot\",\n \"level_1\": {\"level_2\": {\"foo\": 1, \"bar\": 2}},\n \"list\": [\"random\", {\"this\": {\"foo\": 1, \"bar\": 2}}],\n }", "def copy_files(root: ET.Element) -> None:\r\n\r\n if type(root) != ET.Element:\r\n raise TypeError(\"Pass the parameter incorrectly, pass - xml.etree.ElementTree.Element\")\r\n\r\n for child in root:\r\n source_path: str = os.path.join(child.attrib.get('source_path'), child.attrib.get('file_name'))\r\n source_path = os.path.normpath(source_path)\r\n destination_path: str = child.get('destination_path')\r\n\r\n try:\r\n sh.copyfile(source_path, destination_path)\r\n except IOError as error:\r\n print(error)\r\n else:\r\n print(f\"[SUCCESS] - File - {source_path} copied successfully!\\n\")", "def copytree(source, filesfilter=[\"*\"], dirs=[\"web\", \"cache\"]):\n\n destiny = hashlib.md5(flatname(source).encode(\"utf-8\")).hexdigest()\n destiny_path = os.path.join(HOME, *dirs, destiny)\n\n if os.path.exists(destiny_path):\n shutil.rmtree(destiny_path)\n shutil.copytree(source, destiny_path, ignore=allow_patterns(*filesfilter))\n\n return destiny_path", "def make_structure(file_name):\n for i in os.walk(f'{tmp_path}/{file_name}'):\n fld = i[0].split(file_name)[-1]\n if fld:\n loc = f\"{output_path}{fld}\"\n if 'ppt' not in loc and (file_name not in loc):\n shutil.rmtree(f'{output_path}/{fld}')\n shutil.copytree(f'{tmp_path}/{file_name}/{i[0].split(file_name)[-1]}', f'{output_path}/{fld}')\n return", "def add(self, *filesets):\r\n for fileset in filesets:\r\n paths = fileset() if isinstance(fileset, Fileset) \\\r\n else fileset if hasattr(fileset, '__iter__') \\\r\n else [fileset]\r\n for path in paths:\r\n abspath = path\r\n if not os.path.isabs(abspath):\r\n abspath = os.path.join(self._base, path)\r\n if not os.path.exists(abspath):\r\n raise ValueError('Given path: %s with absolute path: %s which does not exist'\r\n % (path, abspath))\r\n self.filemap[abspath] = self.mapper(abspath)\r\n return self", "def files(self):\n all_files = set()\n for label in self.filesets:\n all_files.update(self.filesets[label])\n return all_files", "def __copyFiles(self):\n if os.path.isdir(self.__sourcePath):\n shutil.copytree(self.__sourcePath, self.__targetPath)\n else:\n shutil.copy2(self.__sourcePath, self.__targetPath)", "def deep_merge(source, dest):\n for key, value in source.iteritems():\n if key in dest:\n if isinstance(value, dict) and isinstance(dest[key], dict):\n deep_merge(value, dest[key])\n continue\n elif isinstance(value, list) and isinstance(dest[key], list):\n for item in value:\n if item not in dest[key]:\n dest[key].append(item)\n continue\n dest[key] = value", "def files(self):\r\n all_files = set()\r\n for label in self.filesets:\r\n all_files.update(self.filesets[label])\r\n return all_files", "def files(self):\r\n all_files = set()\r\n for label in self.filesets:\r\n all_files.update(self.filesets[label])\r\n return all_files", "def copy_tree_with_hidden(src_path: pathlib.Path, dest_path: pathlib.Path) -> None:\n if not dest_path.parent.exists():\n dest_path.parent.mkdir(parents=True)\n shutil.copytree(str(src_path), str(dest_path), copy_function=shutil.copy2)\n make_dot_files_in_tree_hidden(dest_path)" ]
[ "0.51228863", "0.5028856", "0.5017779", "0.49732214", "0.4929427", "0.48665708", "0.48508525", "0.48083517", "0.4803307", "0.47982416", "0.47842366", "0.4758702", "0.47544804", "0.47480837", "0.46908888", "0.46712145", "0.46549702", "0.46467698", "0.46445325", "0.46301386", "0.46236047", "0.45950335", "0.45943967", "0.45941868", "0.4568781", "0.45656294", "0.4546672", "0.4530432", "0.4530432", "0.45206854" ]
0.67138475
0
Update all templates that are present in the input spec. Should be run when all inputs used in the templates are already set.
def template_update(inputs, output_dir, state_ind=None, map_copyfiles=None): inputs_dict_st = attr.asdict(inputs, recurse=False) if map_copyfiles is not None: inputs_dict_st.update(map_copyfiles) if state_ind is not None: for k, v in state_ind.items(): k = k.split(".")[1] inputs_dict_st[k] = inputs_dict_st[k][v] from .specs import attr_fields # Collect templated inputs for which all requirements are satisfied. fields_templ = [ field for field in attr_fields(inputs) if field.metadata.get("output_file_template") and all( getattr(inputs, required_field) is not attr.NOTHING for required_field in field.metadata.get("requires", ()) ) ] dict_mod = {} for fld in fields_templ: dict_mod[fld.name] = template_update_single( field=fld, inputs=inputs, inputs_dict_st=inputs_dict_st, output_dir=output_dir, ) # adding elements from map_copyfiles to fields with templates if map_copyfiles: dict_mod.update(map_copyfiles) return dict_mod
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_templates(self):\n\n params = self.chose_param_value(\"--temp\")\n self._check_path_availability([\"get_template_dir\", \"get_template_dir_to\"])\n if self._check_whether_has_params(params):\n self.updater.update_files(\n self.analizer.get_template_dir(),\n self.analizer.get_template_dir_to(),\n params\n )\n return self.write_debug_message(\"Temp files upgrade is done!\\n\")\n return self.write_error_message(\"You haven't passed any params about template files\")", "def update():\n if Project.use_templates:\n defaults = _project_defaults()\n\n template = Template()\n\n for template_dir in [os.path.abspath(os.path.join(herringlib, 'herringlib', 'templates'))\n for herringlib in HerringFile.herringlib_paths]:\n\n info(\"template directory: %s\" % template_dir)\n # noinspection PyArgumentEqualDefault\n template.generate(template_dir, defaults, overwrite=False)", "def __fill_all_templates__(self,configs):\n template_dir = configs['system'].get('Common_directories','template')\n sample_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','sample'))\n system_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','system'))\n qsub_template = os.path.join(template_dir,configs['pipeline'].get('Template_files','bcbio'))\n self.__fill_template__(sample_template,self.sample_file)\n self.__fill_template__(system_template,self.systems_file)\n self.__fill_template__(qsub_template,self.qsub_file)", "def template_update_single(\n field, inputs, inputs_dict_st=None, output_dir=None, spec_type=\"input\"\n):\n # if input_dict_st with state specific value is not available,\n # the dictionary will be created from inputs object\n from ..utils.typing import TypeParser # noqa\n from pydra.engine.specs import LazyField\n\n VALID_TYPES = (str, ty.Union[str, bool], Path, ty.Union[Path, bool], LazyField)\n\n if inputs_dict_st is None:\n inputs_dict_st = attr.asdict(inputs, recurse=False)\n\n if spec_type == \"input\":\n inp_val_set = inputs_dict_st[field.name]\n if inp_val_set is not attr.NOTHING and not TypeParser.is_instance(\n inp_val_set, VALID_TYPES\n ):\n raise TypeError(\n f\"'{field.name}' field has to be a Path instance or a bool, but {inp_val_set} set\"\n )\n if isinstance(inp_val_set, bool) and field.type in (Path, str):\n raise TypeError(\n f\"type of '{field.name}' is Path, consider using Union[Path, bool]\"\n )\n elif spec_type == \"output\":\n if not TypeParser.contains_type(FileSet, field.type):\n raise TypeError(\n f\"output {field.name} should be file-system object, but {field.type} \"\n \"set as the type\"\n )\n else:\n raise TypeError(f\"spec_type can be input or output, but {spec_type} provided\")\n # for inputs that the value is set (so the template is ignored)\n if spec_type == \"input\" and isinstance(inputs_dict_st[field.name], (str, Path)):\n return inputs_dict_st[field.name]\n elif spec_type == \"input\" and inputs_dict_st[field.name] is False:\n # if input fld is set to False, the fld shouldn't be used (setting NOTHING)\n return attr.NOTHING\n else: # inputs_dict[field.name] is True or spec_type is output\n value = _template_formatting(field, inputs, inputs_dict_st)\n # changing path so it is in the output_dir\n if output_dir and value is not attr.NOTHING:\n # should be converted to str, it is also used for input fields that should be str\n if type(value) is list:\n return [str(output_dir / Path(val).name) for val in value]\n else:\n return str(output_dir / Path(value).name)\n else:\n return attr.NOTHING", "def updateWidgets(self):\n super(AdminRulesForm, self).updateWidgets()\n available_templates = getUtility(IVocabularyFactory, name='collective.imagetags.templates')(self.context)\n skins = getToolByName(self.context, 'portal_skins')\n path = skins.getSkinPath(skins.getDefaultSkin())\n paths = [i.strip() for i in path.split(',')]\n include = False\n improved_templates = []\n for template in available_templates.by_value:\n # If template directory is available and (is before 'plone_content' or 'plone_content' isn't available)...\n include = (template in paths and 'plone_content' in paths and paths.index(template)<paths.index('plone_content')) or \\\n (template in paths and not 'plone_conent' in paths)\n \n # ... then check it\n if include:\n term = available_templates.getTerm(template)\n improved_templates.append(term.token)\n\n for template in self.widgets['improved_templates'].items:\n template['checked'] = template['value'] in improved_templates", "def update_template(template, trial):\n assert isinstance(template, dict) or isinstance(template, list)\n items = template.items() if isinstance(template, dict) else enumerate(template)\n\n for key, value in items:\n if isinstance(value, str):\n if value in trial:\n template[key] = trial[value]\n elif isinstance(value, dict) or isinstance(value, list):\n template[key] = ConfigGenerator.update_template(template[key], trial)\n\n return template", "def write_inp_all(self, type, template):\n \n for struct in self.structs:\n try:\n state = self.gEs.loc[struct,'Ground State']\n if state in self.states:\n self.assemble_inp(struct, template, state, type)\n except KeyError:\n print(\"Ground state missing for %s. Rerun whaler gs.\" % struct)", "def update_service_template(\n templates: Dict[str, Any], source_data: str,\n namespace: Optional[str], service: Optional[str], color: Optional[str], purpose: str,\n) -> None:\n service_templates = templates['service-templates']\n assert isinstance(service_templates, list)\n for service_template in service_templates:\n if (\n service_template.get('namespace') == namespace\n and service_template.get('service') == service\n and service_template.get('color') == color\n and service_template.get('purpose') == purpose\n ):\n service_template['template'] = source_data\n return\n service_templates.append({\n 'namespace': namespace,\n 'service': service,\n 'color': color,\n 'purpose': purpose,\n 'template': source_data,\n })", "def template_spec(self, template_spec):\n\n self._template_spec = template_spec", "def updateTemplatesCov(self, templates, cov):\n\t\t\n\t\t# Update templates\n\t\tself.specTemplates = np.copy(templates)\n\t\tself.nTemplates = self.specTemplates.shape[1]\n\t\t\n\t\t# Update covariance\n\t\tself.cov = cov\n\t\tself.covInv = 1.0 / self.cov\n\t\t\n\t\t# Update specTempT, matrix B and determinant covariance matrix\n\t\tself.specTempT = self.__createSpecTempT()\n\t\tself.B = self.specTempT.dot(self.specTemplates)\n\t\tself.detCov = np.sum(np.log(self.cov))", "def pull_templates(self):\n try:\n backend_templates = self.client.list_all_templates()\n except VMwareError as e:\n raise VMwareBackendError(e)\n\n if is_basic_mode():\n # If basic mode is enabled, we should filter out templates which have more than 1 NIC\n backend_templates = [\n template\n for template in backend_templates\n if len(template['template']['nics']) == 1\n ]\n\n backend_templates_map = {\n item['library_item']['id']: item for item in backend_templates\n }\n\n frontend_templates_map = {\n p.backend_id: p\n for p in models.Template.objects.filter(settings=self.settings)\n }\n\n stale_ids = set(frontend_templates_map.keys()) - set(\n backend_templates_map.keys()\n )\n new_ids = set(backend_templates_map.keys()) - set(frontend_templates_map.keys())\n common_ids = set(backend_templates_map.keys()) & set(\n frontend_templates_map.keys()\n )\n\n for library_item_id in new_ids:\n template = self._backend_template_to_template(\n backend_templates_map[library_item_id]\n )\n template.save()\n\n for library_item_id in common_ids:\n backend_template = self._backend_template_to_template(\n backend_templates_map[library_item_id]\n )\n frontend_template = frontend_templates_map[library_item_id]\n fields = (\n 'cores',\n 'cores_per_socket',\n 'ram',\n 'disk',\n 'guest_os',\n 'modified',\n 'description',\n )\n update_pulled_fields(frontend_template, backend_template, fields)\n\n models.Template.objects.filter(\n settings=self.settings, backend_id__in=stale_ids\n ).delete()", "def update_gateway_template(\n templates: Dict[str, Any], source_data: str,\n namespace: Optional[str], purpose: str,\n) -> None:\n gateway_templates = templates['gateway-templates']\n assert isinstance(gateway_templates, list)\n for gateway_template in gateway_templates:\n if (\n gateway_template.get('namespace') == namespace\n and gateway_template.get('purpose') == purpose\n ):\n gateway_template['template'] = source_data\n return\n gateway_templates.append({\n 'namespace': namespace,\n 'purpose': purpose,\n 'template': source_data,\n })", "def put(self):\n try:\n _import_templates(force=True)\n return self.make_response('Imported templates')\n except:\n self.log.exception('Failed importing templates')\n return self.make_response('Failed importing templates', HTTP.SERVER_ERROR)", "def fit_to_template(self, templates: EquivDict) -> None:\n label_values = set(seginfo.label_value for seginfo in self.infos.values())\n for segment in self.infos.values():\n other_label_values = label_values - set((segment.label_value,))\n for equivalents, updated_attrs in templates.items():\n if segment.name in equivalents:\n # update the SegmentInfo instance describing the\n # semantic class\n assert 'name' in updated_attrs, 'Requiring name to identify segment!'\n segment.name = updated_attrs['name']\n # We have to dispatch relabel or swaplabel if label values are changed\n # to ensure that the numerical data member (numpy.ndarray) and the\n # describing SegmentInfo instances are synchronized \n for attr_name, new_attr_val in updated_attrs.items():\n if new_attr_val is None:\n continue\n elif attr_name == 'label_value':\n if new_attr_val == segment.label_value:\n # no change of label_value as value is identical\n continue\n elif new_attr_val in other_label_values:\n # use swaplabel to avoid label value clash\n self.swaplabel(segment.label_value, new_attr_val)\n else:\n # easy relabel as new label_value is not pre-existing\n self.relabel(segment.label_value, new_attr_val)\n else:\n setattr(segment, attr_name, new_attr_val)\n break\n # propagate state changes\n self._update_state_from_infos()", "def _update_files():\n configuration_settings = get_configuration()\n\n # Need to find all of the files that are stored in the input_files directories in order to start building the\n # reports that will be used to generate the static log files.\n for input_path in configuration_settings.processing.inputs:\n search_path = pathlib.Path(input_path)\n\n # Currently going to make the assumption that everyone is using the path naming convention that I'm dictating\n # which is YYYY/MM/DD/file.ext\n for file_component in search_path.glob('*/*/*/*'):\n # Store all of the files into a dictionary containing the keys and a list of the files that are associated\n # with that day\n updaters.update_files(search_path, file_component)", "def update(self, inputs): # pragma: no cover\n return inputs", "def run():\r\n template_locations = settings.MAKO_TEMPLATES\r\n for namespace, directories in template_locations.items():\r\n clear_lookups(namespace)\r\n for directory in directories:\r\n add_lookup(namespace, directory)", "def reattach_template(self, api, templates_iter):\n def get_template_input(template_id):\n uuid_list = [uuid for uuid, _ in DeviceTemplateAttached.get_raise(api, template_id)]\n values = DeviceTemplateValues(api.post(DeviceTemplateValues.api_params(template_id, uuid_list),\n DeviceTemplateValues.api_path.post))\n return values.input_list()\n\n def is_template_cli(template_id):\n return DeviceTemplate.get_raise(api, template_id).is_type_cli\n\n template_input_list = [\n (template_name, template_id, get_template_input(template_id), is_template_cli(template_id))\n for template_name, template_id in templates_iter\n ]\n return self._place_requests(api, template_input_list, is_edited=True)", "def test_update_template_registration(self):\n pass", "def test_update(self):\n # creating a new sample template\n st = SampleTemplate.create(self.metadata, self.new_study)\n # updating the sample template\n st.update(self.metadata_dict_updated)\n\n # validating values\n exp = self.metadata_dict_updated_dict['Sample1'].values()\n obs = st.get('2.Sample1').values()\n self.assertItemsEqual(obs, exp)\n\n exp = self.metadata_dict_updated_dict['Sample2'].values()\n obs = st.get('2.Sample2').values()\n self.assertItemsEqual(obs, exp)\n\n exp = self.metadata_dict_updated_dict['Sample3'].values()\n obs = st.get('2.Sample3').values()\n self.assertItemsEqual(obs, exp)\n\n # checking errors\n with self.assertRaises(QiitaDBError):\n st.update(self.metadata_dict_updated_sample_error)\n with self.assertRaises(QiitaDBError):\n st.update(self.metadata_dict_updated_column_error)", "def set_inputs(self, inputs):\n for name, config in _iteritems(inputs):\n self.add_input(name, config[\"file\"], config[\"type\"] if \"type\" in config else None)", "def update(self, *inputs):\n raise NotImplementedError", "def _update_template(template_path):\n template_definition = template_path\n\n # template output directory is output/templates, so need to create that location before pulling out the templates\n template_location = template_utilities.get_template_directory()\n\n # Install the template and get the path to the template directory for updating the configuration file.\n templates_path = template_utilities.install_template(template_location, template_definition)\n\n if templates_path:\n # Now need to find the templates definition of that zip file and locate it in the file system so that it can be\n settings = get_configuration()\n\n # Override the configuration details with the new template path. This should probably be handled by the\n # publishing plugin, but for now this will work\n settings.publishing.templates = str(templates_path.relative_to(get_configuration_root()))\n configuration_file_path = get_configuration_root() / 'config.yaml'\n\n dump_configuration(configuration_file_path, settings)", "def _template_formatting(field, inputs, inputs_dict_st):\n from .specs import MultiInputObj, MultiOutputFile\n\n # if a template is a function it has to be run first with the inputs as the only arg\n template = field.metadata[\"output_file_template\"]\n if callable(template):\n template = template(inputs)\n\n # as default, we assume that keep_extension is True\n keep_extension = field.metadata.get(\"keep_extension\", True)\n\n inp_fields = re.findall(r\"{\\w+}\", template)\n inp_fields_fl = re.findall(r\"{\\w+:[0-9.]+f}\", template)\n inp_fields += [re.sub(\":[0-9.]+f\", \"\", el) for el in inp_fields_fl]\n if len(inp_fields) == 0:\n return template\n\n val_dict = {}\n file_template = None\n\n for fld in inp_fields:\n fld_name = fld[1:-1] # extracting the name form {field_name}\n if fld_name not in inputs_dict_st:\n raise AttributeError(f\"{fld_name} is not provided in the input\")\n fld_value = inputs_dict_st[fld_name]\n if fld_value is attr.NOTHING:\n # if value is NOTHING, nothing should be added to the command\n return attr.NOTHING\n else:\n # checking for fields that can be treated as a file:\n # have type File, or value that is path like (including str with extensions)\n if isinstance(fld_value, os.PathLike) or (\n isinstance(fld_value, str) and \".\" in fld_value\n ):\n if file_template:\n raise Exception(\n f\"can't have multiple paths in {field.name} template,\"\n f\" but {template} provided\"\n )\n else:\n file_template = (fld_name, fld_value)\n else:\n val_dict[fld_name] = fld_value\n\n # if field is MultiOutputFile and some elements from val_dict are lists,\n # each element of the list should be used separately in the template\n # and return a list with formatted values\n if field.type is MultiOutputFile and any(\n [isinstance(el, (list, MultiInputObj)) for el in val_dict.values()]\n ):\n # all fields that are lists\n keys_list = [\n k for k, el in val_dict.items() if isinstance(el, (list, MultiInputObj))\n ]\n if any(\n [len(val_dict[key]) != len(val_dict[keys_list[0]]) for key in keys_list[1:]]\n ):\n raise Exception(\n f\"all fields used in {field.name} template have to have the same length\"\n f\" or be a single value\"\n )\n formatted_value = []\n for ii in range(len(val_dict[keys_list[0]])):\n val_dict_el = copy(val_dict)\n # updating values to a single element from the list\n for key in keys_list:\n val_dict_el[key] = val_dict[key][ii]\n\n formatted_value.append(\n _element_formatting(\n template, val_dict_el, file_template, keep_extension=keep_extension\n )\n )\n else:\n formatted_value = _element_formatting(\n template, val_dict, file_template, keep_extension=keep_extension\n )\n return formatted_value", "def test_update_device_template(self):\n pass", "def update_from_resources(self, resources):\n for resource in resources:\n cmpt_name = resource['component']\n cmpt = self.get_component(cmpt_name)\n cmpt.update_from_index(resource)", "def _set_templates(spm_dir=SPM_DIR):\n global EPI_TEMPLATE, T1_TEMPLATE, GM_TEMPLATE, WM_TEMPLATE, CSF_TEMPLATE\n\n spm_version = _get_version_spm(SPM_DIR)\n\n # Set the tpm and template paths according to SPM version\n if spm_version == 'spm12':\n template_path = 'toolbox/OldNorm'\n tpm_path = 'toolbox/OldSeg'\n else:\n template_path = 'templates'\n tpm_path = 'tpm'\n\n # configure template images\n EPI_TEMPLATE = os.path.join(SPM_DIR, template_path, 'EPI.nii')\n SPM_T1_TEMPLATE = os.path.join(SPM_DIR, template_path, 'T1.nii')\n T1_TEMPLATE = \"/usr/share/data/fsl-mni152-templates/avg152T1.nii\"\n if not os.path.isfile(T1_TEMPLATE):\n T1_TEMPLATE += '.gz'\n if not os.path.exists(T1_TEMPLATE):\n T1_TEMPLATE = SPM_T1_TEMPLATE\n GM_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'grey.nii')\n WM_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'white.nii')\n CSF_TEMPLATE = os.path.join(SPM_DIR, tpm_path, 'csf.nii')", "def update_input_model(input_model, heat_template):\n for server in input_model['servers']:\n heat_server = list(filter(lambda s: server['id'] == s['name'],\n heat_template['servers']))\n if not heat_server:\n # Skip servers that have been filtered out\n # by the heat template generator\n continue\n server['nic-mapping'] = \\\n \"HEAT-{}\".format(heat_server[0]['interface_model'])\n\n for interface_model in itervalues(heat_template['interface_models']):\n mapping_name = \"HEAT-{}\".format(interface_model['name'])\n physical_ports = []\n nic_mapping = {\n 'name': mapping_name,\n 'physical-ports': physical_ports\n }\n for port_idx, port in enumerate(interface_model['ports']):\n physical_ports.append({\n 'logical-name': port['name'],\n 'type': 'simple-port',\n 'bus-address': \"0000:00:{:02x}.0\".format(port_idx + 3)\n })\n\n # Overwrite the mapping, if it's already defined\n existing_mapping = list(filter(lambda mapping:\n mapping[1]['name'] == mapping_name,\n enumerate(input_model['nic-mappings'])))\n if existing_mapping:\n input_model['nic-mappings'][existing_mapping[0][0]] = nic_mapping\n else:\n input_model['nic-mappings'].append(nic_mapping)\n\n return input_model", "def update_values(self):\n for key in self.inputs.keys():\n value = self.inputs[key]['entry'].get()\n self.inputs[key]['value'] = value", "def _fill_template(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n\n line_object_keys = [\"quickReply\", \"items\", \"action\", \"template\", \"actions\"]\n\n if type(template) == list:\n for item in template:\n self._fill_template(item, template_vars)\n else:\n self._fill_template_text(template, template_vars)\n for key in line_object_keys:\n if key in template:\n self._fill_template(template[key], template_vars)\n\n return template" ]
[ "0.6427744", "0.6378328", "0.6020482", "0.5955375", "0.57841974", "0.56510425", "0.5642654", "0.56077933", "0.5507823", "0.5482123", "0.5453561", "0.54000205", "0.53835785", "0.5374027", "0.5366222", "0.5319401", "0.529433", "0.5287437", "0.52484876", "0.52362716", "0.5217521", "0.521424", "0.52128106", "0.52058566", "0.520359", "0.5203486", "0.51936746", "0.5192019", "0.5178563", "0.51727754" ]
0.66228896
0
Update a single template from the input_spec or output_spec based on the value from inputs_dict (checking the types of the fields, that have "output_file_template)"
def template_update_single( field, inputs, inputs_dict_st=None, output_dir=None, spec_type="input" ): # if input_dict_st with state specific value is not available, # the dictionary will be created from inputs object from ..utils.typing import TypeParser # noqa from pydra.engine.specs import LazyField VALID_TYPES = (str, ty.Union[str, bool], Path, ty.Union[Path, bool], LazyField) if inputs_dict_st is None: inputs_dict_st = attr.asdict(inputs, recurse=False) if spec_type == "input": inp_val_set = inputs_dict_st[field.name] if inp_val_set is not attr.NOTHING and not TypeParser.is_instance( inp_val_set, VALID_TYPES ): raise TypeError( f"'{field.name}' field has to be a Path instance or a bool, but {inp_val_set} set" ) if isinstance(inp_val_set, bool) and field.type in (Path, str): raise TypeError( f"type of '{field.name}' is Path, consider using Union[Path, bool]" ) elif spec_type == "output": if not TypeParser.contains_type(FileSet, field.type): raise TypeError( f"output {field.name} should be file-system object, but {field.type} " "set as the type" ) else: raise TypeError(f"spec_type can be input or output, but {spec_type} provided") # for inputs that the value is set (so the template is ignored) if spec_type == "input" and isinstance(inputs_dict_st[field.name], (str, Path)): return inputs_dict_st[field.name] elif spec_type == "input" and inputs_dict_st[field.name] is False: # if input fld is set to False, the fld shouldn't be used (setting NOTHING) return attr.NOTHING else: # inputs_dict[field.name] is True or spec_type is output value = _template_formatting(field, inputs, inputs_dict_st) # changing path so it is in the output_dir if output_dir and value is not attr.NOTHING: # should be converted to str, it is also used for input fields that should be str if type(value) is list: return [str(output_dir / Path(val).name) for val in value] else: return str(output_dir / Path(value).name) else: return attr.NOTHING
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def template_update(inputs, output_dir, state_ind=None, map_copyfiles=None):\n\n inputs_dict_st = attr.asdict(inputs, recurse=False)\n if map_copyfiles is not None:\n inputs_dict_st.update(map_copyfiles)\n\n if state_ind is not None:\n for k, v in state_ind.items():\n k = k.split(\".\")[1]\n inputs_dict_st[k] = inputs_dict_st[k][v]\n\n from .specs import attr_fields\n\n # Collect templated inputs for which all requirements are satisfied.\n fields_templ = [\n field\n for field in attr_fields(inputs)\n if field.metadata.get(\"output_file_template\")\n and all(\n getattr(inputs, required_field) is not attr.NOTHING\n for required_field in field.metadata.get(\"requires\", ())\n )\n ]\n\n dict_mod = {}\n for fld in fields_templ:\n dict_mod[fld.name] = template_update_single(\n field=fld,\n inputs=inputs,\n inputs_dict_st=inputs_dict_st,\n output_dir=output_dir,\n )\n # adding elements from map_copyfiles to fields with templates\n if map_copyfiles:\n dict_mod.update(map_copyfiles)\n return dict_mod", "def _template_formatting(field, inputs, inputs_dict_st):\n from .specs import MultiInputObj, MultiOutputFile\n\n # if a template is a function it has to be run first with the inputs as the only arg\n template = field.metadata[\"output_file_template\"]\n if callable(template):\n template = template(inputs)\n\n # as default, we assume that keep_extension is True\n keep_extension = field.metadata.get(\"keep_extension\", True)\n\n inp_fields = re.findall(r\"{\\w+}\", template)\n inp_fields_fl = re.findall(r\"{\\w+:[0-9.]+f}\", template)\n inp_fields += [re.sub(\":[0-9.]+f\", \"\", el) for el in inp_fields_fl]\n if len(inp_fields) == 0:\n return template\n\n val_dict = {}\n file_template = None\n\n for fld in inp_fields:\n fld_name = fld[1:-1] # extracting the name form {field_name}\n if fld_name not in inputs_dict_st:\n raise AttributeError(f\"{fld_name} is not provided in the input\")\n fld_value = inputs_dict_st[fld_name]\n if fld_value is attr.NOTHING:\n # if value is NOTHING, nothing should be added to the command\n return attr.NOTHING\n else:\n # checking for fields that can be treated as a file:\n # have type File, or value that is path like (including str with extensions)\n if isinstance(fld_value, os.PathLike) or (\n isinstance(fld_value, str) and \".\" in fld_value\n ):\n if file_template:\n raise Exception(\n f\"can't have multiple paths in {field.name} template,\"\n f\" but {template} provided\"\n )\n else:\n file_template = (fld_name, fld_value)\n else:\n val_dict[fld_name] = fld_value\n\n # if field is MultiOutputFile and some elements from val_dict are lists,\n # each element of the list should be used separately in the template\n # and return a list with formatted values\n if field.type is MultiOutputFile and any(\n [isinstance(el, (list, MultiInputObj)) for el in val_dict.values()]\n ):\n # all fields that are lists\n keys_list = [\n k for k, el in val_dict.items() if isinstance(el, (list, MultiInputObj))\n ]\n if any(\n [len(val_dict[key]) != len(val_dict[keys_list[0]]) for key in keys_list[1:]]\n ):\n raise Exception(\n f\"all fields used in {field.name} template have to have the same length\"\n f\" or be a single value\"\n )\n formatted_value = []\n for ii in range(len(val_dict[keys_list[0]])):\n val_dict_el = copy(val_dict)\n # updating values to a single element from the list\n for key in keys_list:\n val_dict_el[key] = val_dict[key][ii]\n\n formatted_value.append(\n _element_formatting(\n template, val_dict_el, file_template, keep_extension=keep_extension\n )\n )\n else:\n formatted_value = _element_formatting(\n template, val_dict, file_template, keep_extension=keep_extension\n )\n return formatted_value", "def __fill_template__(self,template_file,output_fname):\n dictionary = {}\n for k,v in self.__dict__.iteritems():\n if k == 'sample_key':\n try:\n int(v)\n new_sample_key = \"Sample_\" + str(v)\n dictionary.update({k:new_sample_key})\n continue\n except ValueError:\n pass\n dictionary.update({k:str(v)})\n dictionary.update({'restats_tail': self.restats_file + '.tail'})\n with open(output_fname,'w') as f:\n string = fill_template(template_file,dictionary)\n f.write(string)", "def update_template(template, trial):\n assert isinstance(template, dict) or isinstance(template, list)\n items = template.items() if isinstance(template, dict) else enumerate(template)\n\n for key, value in items:\n if isinstance(value, str):\n if value in trial:\n template[key] = trial[value]\n elif isinstance(value, dict) or isinstance(value, list):\n template[key] = ConfigGenerator.update_template(template[key], trial)\n\n return template", "def map_template(template: dict, input_: dict) -> None:\n for k, v in template.items():\n config_val = input_.get(k)\n\n if isinstance(v, dict) and k != 'NullHandler':\n map_template(v, input_)\n\n if config_val:\n template[k] = config_val.upper() if k == 'level' else config_val", "def create_file_from_template(template_file, output_file, substitution_dict):\n with open(template_file) as fl:\n template = fl.read()\n template = Template(template).safe_substitute(**substitution_dict)\n\n with open(output_file, 'w') as output_handle:\n output_handle.write(template)", "def template_replace(template, replace_map, result):\n # Read content of source file.\n with open(template) as fp:\n lines = fp.readlines()\n # Replace placeholders.\n for key, value in list(replace_map.items()):\n for i, line in enumerate(lines):\n # Ignore VHDL comments\n if not line.strip().startswith('--'):\n lines[i] = line.replace(key, value)\n # Write content to destination file.\n with open(result, 'w') as fp:\n fp.write(''.join(lines))", "def _modify_template_nics_according_to_input(template_nics, input_nics, cmd, client,\n resource_group_name, vm_name,\n location, private_cloud):\n # Populating the nic names of vm-template in a dictionary,\n # and mapping them to their index in template_nics list\n vm_template_nic_names = {}\n for (i, nic) in enumerate(template_nics):\n vm_template_nic_names[nic.virtual_nic_name] = i\n\n from .vendored_sdks.models import VirtualNic\n from .vendored_sdks.models import VirtualNetwork\n from ._validators import virtual_network_name_or_id_validator\n\n # Check if nics entered by a user exist in vm-template,\n # then override the properties specified. Else create a new nic.\n for nic in input_nics:\n if nic['name'] in vm_template_nic_names.keys():\n index = vm_template_nic_names[nic['name']]\n if 'virtual-network' in nic.keys():\n template_nics[index].network.id = nic['virtual-network']\n if 'adapter' in nic.keys():\n template_nics[index].nic_type = nic['adapter']\n if 'power-on-boot' in nic.keys():\n template_nics[index].power_on_boot = nic['power-on-boot']\n template_nics[index].virtual_nic_id = None\n\n else:\n nic_name = nic['name']\n if 'virtual-network' in nic.keys():\n vnet = nic['virtual-network']\n else:\n raise CLIError('virtual-network parameter not specified for nic ' +\n nic_name + \".\")\n if 'adapter' in nic.keys():\n adapter = nic['adapter']\n else:\n raise CLIError('adapter parameter not specified for nic ' +\n nic_name + \".\")\n if 'power-on-boot' in nic.keys():\n power_on_boot = nic['power-on-boot']\n else:\n raise CLIError('power-on-boot parameter not specified for nic ' +\n nic_name + \".\")\n\n vnet = virtual_network_name_or_id_validator(cmd, client, vnet,\n resource_group_name, vm_name,\n location, private_cloud)\n network = VirtualNetwork(id=vnet)\n nic_object = VirtualNic(network=network,\n nic_type=adapter,\n power_on_boot=power_on_boot)\n template_nics.append(nic_object)\n return template_nics", "def update_service_template(\n templates: Dict[str, Any], source_data: str,\n namespace: Optional[str], service: Optional[str], color: Optional[str], purpose: str,\n) -> None:\n service_templates = templates['service-templates']\n assert isinstance(service_templates, list)\n for service_template in service_templates:\n if (\n service_template.get('namespace') == namespace\n and service_template.get('service') == service\n and service_template.get('color') == color\n and service_template.get('purpose') == purpose\n ):\n service_template['template'] = source_data\n return\n service_templates.append({\n 'namespace': namespace,\n 'service': service,\n 'color': color,\n 'purpose': purpose,\n 'template': source_data,\n })", "def input_template(template, fields):\n editor = os.environ.get('EDITOR', '/usr/bin/vim')\n with tempfile.NamedTemporaryFile('w+t') as ofile:\n ofile.write(template % fields)\n ofile.flush()\n user_command = '%s %s' % (editor, ofile.name)\n if os.system(user_command) != 0:\n raise Error('Error acquiring user input (command was %r).' % user_command)\n with open(ofile.name, 'r') as ifile:\n filled_template = ifile.read()\n\n fields = dict(parse_template(filled_template))\n return fields", "def update_input_model(input_model, heat_template):\n for server in input_model['servers']:\n heat_server = list(filter(lambda s: server['id'] == s['name'],\n heat_template['servers']))\n if not heat_server:\n # Skip servers that have been filtered out\n # by the heat template generator\n continue\n server['nic-mapping'] = \\\n \"HEAT-{}\".format(heat_server[0]['interface_model'])\n\n for interface_model in itervalues(heat_template['interface_models']):\n mapping_name = \"HEAT-{}\".format(interface_model['name'])\n physical_ports = []\n nic_mapping = {\n 'name': mapping_name,\n 'physical-ports': physical_ports\n }\n for port_idx, port in enumerate(interface_model['ports']):\n physical_ports.append({\n 'logical-name': port['name'],\n 'type': 'simple-port',\n 'bus-address': \"0000:00:{:02x}.0\".format(port_idx + 3)\n })\n\n # Overwrite the mapping, if it's already defined\n existing_mapping = list(filter(lambda mapping:\n mapping[1]['name'] == mapping_name,\n enumerate(input_model['nic-mappings'])))\n if existing_mapping:\n input_model['nic-mappings'][existing_mapping[0][0]] = nic_mapping\n else:\n input_model['nic-mappings'].append(nic_mapping)\n\n return input_model", "def update_gateway_template(\n templates: Dict[str, Any], source_data: str,\n namespace: Optional[str], purpose: str,\n) -> None:\n gateway_templates = templates['gateway-templates']\n assert isinstance(gateway_templates, list)\n for gateway_template in gateway_templates:\n if (\n gateway_template.get('namespace') == namespace\n and gateway_template.get('purpose') == purpose\n ):\n gateway_template['template'] = source_data\n return\n gateway_templates.append({\n 'namespace': namespace,\n 'purpose': purpose,\n 'template': source_data,\n })", "def post_service_template_update(self, resource_id, resource_dict):\n pass", "def _modify_template_disks_according_to_input(template_disks, input_disks):\n\n # Populating the disk names of vm-template in a dictionary,\n # and mapping them to their index in template_disks list\n vm_template_disk_names = {}\n for (i, disk) in enumerate(template_disks):\n vm_template_disk_names[disk.virtual_disk_name] = i\n\n from .vendored_sdks.models import VirtualDisk\n\n # Check if disks entered by the user exist in vm-template,\n # then override the properties specified. Else create a new disk.\n for disk in input_disks:\n if disk['name'] in vm_template_disk_names.keys():\n index = vm_template_disk_names[disk['name']]\n if 'controller' in disk.keys():\n template_disks[index].controller_id = disk['controller']\n if 'mode' in disk.keys():\n template_disks[index].independence_mode = disk['mode']\n if 'size' in disk.keys():\n template_disks[index].total_size = disk['size']\n\n else:\n disk_name = disk['name']\n if 'controller' in disk.keys():\n controller = disk['controller']\n else:\n raise CLIError('controller parameter not specified for disk ' + disk_name + \".\")\n if 'mode' in disk.keys():\n mode = disk['mode']\n else:\n raise CLIError('mode parameter not specified for disk ' + disk_name + \".\")\n if 'size' in disk.keys():\n size = disk['size']\n else:\n raise CLIError('size parameter not specified for disk ' + disk_name + \".\")\n\n disk_object = VirtualDisk(controller_id=controller,\n independence_mode=mode,\n total_size=size)\n template_disks.append(disk_object)\n return template_disks", "def apply_pkginfo_template(override, pkginfo_template):\n # Need to \"convert\" Objc object to dict.\n override[\"Input\"][\"pkginfo\"].update(dict(pkginfo_template))\n print \"\\tApplied pkginfo template.\"", "def generate_input_file(temp_type, out_file):\r\n\r\n file_path = os.path.realpath(__file__)\r\n dir_path = os.sep.join(file_path.split(os.sep)[:-1])\r\n\r\n if temp_type == 0:\r\n template = 'Template00_CompleteParameters.py'\r\n elif temp_type == 1:\r\n template = 'Template01_SingleRowCylindricalRollerBearing.py'\r\n elif temp_type == 3:\r\n template = 'Template03_CylindricalRollerThustBearing.py'\r\n elif temp_type == 4:\r\n template = 'Template04_BallOnDisk.py'\r\n elif temp_type == 5:\r\n template = 'Template05_PinOnDisk.py'\r\n elif temp_type == 6:\r\n template = 'Template06_4Ball.py'\r\n elif temp_type == 7:\r\n template = 'Template07_BallOn3Plates.py'\r\n elif temp_type == 8:\r\n template = 'Template08_RingOnRing.py'\r\n else:\r\n raise ValueError(\"temp_type value '{}' undefined\".format(temp_type))\r\n\r\n shutil.copy(os.sep.join([dir_path, 'UserInputTemplates', template]),\r\n out_file)\r\n return out_file", "def update_templates(self):\n\n params = self.chose_param_value(\"--temp\")\n self._check_path_availability([\"get_template_dir\", \"get_template_dir_to\"])\n if self._check_whether_has_params(params):\n self.updater.update_files(\n self.analizer.get_template_dir(),\n self.analizer.get_template_dir_to(),\n params\n )\n return self.write_debug_message(\"Temp files upgrade is done!\\n\")\n return self.write_error_message(\"You haven't passed any params about template files\")", "def generate_input_files(elevation_folder_path, template_input_file_path):\n import pathlib\n json_dict = get_inputs_from_file(template_input_file_path)\n\n path_to_match = pathlib.Path(elevation_folder_path)\n\n for heightfile in path_to_match.glob(\"*.npy\"):\n dot_index = str(heightfile).rfind('.')\n filename_base = str(heightfile)[:dot_index]\n opt_output_filename = filename_base + \".out\"\n opt_input_filename = filename_base + \".json\"\n\n localdict = json_dict.copy()\n\n localdict[\"output_file\"] = opt_output_filename\n localdict[\"elevation_file\"] = str(heightfile)\n\n dump_json_dict(out_dict=localdict, filename=opt_input_filename)", "def _prepare_for_submission(self, tempfolder, inputdict):\n input_validator = self._get_input_validator(inputdict=inputdict)\n local_input_folder = input_validator(\n name='local_input_folder', valid_types=FolderData, required=False\n )\n remote_input_folder = input_validator(\n name='remote_input_folder', valid_types=RemoteData, required=False\n )\n\n parameters = input_validator(\n name='parameters', valid_types=ParameterData\n )\n param_dict = self._get_validated_parameters_dict(parameters)\n\n projections = input_validator(\n name='projections',\n valid_types=(OrbitalData, List),\n required=False\n )\n kpoints = input_validator(name='kpoints', valid_types=KpointsData)\n kpoint_path = input_validator(\n name='kpoint_path', valid_types=ParameterData, required=False\n )\n structure = input_validator(\n name='structure', valid_types=StructureData\n )\n\n settings = input_validator(\n name='settings', valid_types=ParameterData, required=False\n )\n if settings is None:\n settings_dict = {}\n else:\n settings_dict_raw = settings.get_dict()\n settings_dict = {\n key.lower(): val\n for key, val in settings_dict_raw.items()\n }\n if len(settings_dict_raw) != len(settings_dict):\n raise InputValidationError(\n 'Input settings contain duplicate keys.'\n )\n pp_setup = settings_dict.pop('postproc_setup', False)\n if pp_setup:\n param_dict.update({'postproc_setup': True})\n\n if local_input_folder is None and remote_input_folder is None and pp_setup is False:\n raise InputValidationError(\n 'Either local_input_folder or remote_input_folder must be set.'\n )\n\n code = input_validator(name='code', valid_types=Code)\n\n ############################################################\n # End basic check on inputs\n ############################################################\n random_projections = settings_dict.pop('random_projections', False)\n\n write_win(\n filename=tempfolder.get_abs_path(self._INPUT_FILE),\n parameters=param_dict,\n structure=structure,\n kpoints=kpoints,\n kpoint_path=kpoint_path,\n projections=projections,\n random_projections=random_projections,\n )\n\n if remote_input_folder is not None:\n remote_input_folder_uuid = remote_input_folder.get_computer().uuid\n remote_input_folder_path = remote_input_folder.get_remote_path()\n\n t_dest = get_authinfo(\n computer=remote_input_folder.get_computer(),\n aiidauser=remote_input_folder.get_user()\n ).get_transport()\n with t_dest:\n remote_folder_content = t_dest.listdir(\n path=remote_input_folder_path\n )\n\n if local_input_folder is not None:\n local_folder_content = local_input_folder.get_folder_list()\n if pp_setup:\n required_files = []\n else:\n required_files = [\n self._SEEDNAME + suffix for suffix in ['.mmn', '.amn']\n ]\n optional_files = [\n self._SEEDNAME + suffix for suffix in ['.eig', '.chk', '.spn']\n ]\n input_files = required_files + optional_files\n wavefunctions_files = ['UNK*']\n\n def files_finder(file_list, exact_patterns, glob_patterns):\n result = [f for f in exact_patterns if (f in file_list)]\n import fnmatch\n for glob_p in glob_patterns:\n result += fnmatch.filter(file_list, glob_p)\n return result\n\n # Local FolderData has precedence over RemoteData\n if local_input_folder is not None:\n found_in_local = files_finder(\n local_folder_content, input_files, wavefunctions_files\n )\n else:\n found_in_local = []\n if remote_input_folder is not None:\n found_in_remote = files_finder(\n remote_folder_content, input_files, wavefunctions_files\n )\n found_in_remote = [\n f for f in found_in_remote if f not in found_in_local\n ]\n else:\n found_in_remote = []\n\n not_found = [\n f for f in required_files\n if f not in found_in_remote + found_in_local\n ]\n if len(not_found) != 0:\n raise InputValidationError(\n \"{} necessary input files were not found: {} \".format(\n len(not_found), ', '.join(str(nf) for nf in not_found)\n )\n )\n\n remote_copy_list = []\n remote_symlink_list = []\n local_copy_list = []\n #Here we enforce that everything except checkpoints are symlinked\n #because in W90 you never modify input files on the run\n ALWAYS_COPY_FILES = [self._CHK_FILE]\n for f in found_in_remote:\n file_info = (\n remote_input_folder_uuid,\n os.path.join(remote_input_folder_path, f), os.path.basename(f)\n )\n if f in ALWAYS_COPY_FILES:\n remote_copy_list.append(file_info)\n else:\n remote_symlink_list.append(file_info)\n for f in found_in_local:\n local_copy_list.append(\n (local_input_folder.get_abs_path(f), os.path.basename(f))\n )\n\n # Add any custom copy/sym links\n remote_symlink_list += settings_dict.pop(\n \"additional_remote_symlink_list\", []\n )\n remote_copy_list += settings_dict.pop(\n \"additional_remote_copy_list\", []\n )\n local_copy_list += settings_dict.pop(\"additional_local_copy_list\", [])\n\n #######################################################################\n\n calcinfo = CalcInfo()\n calcinfo.uuid = self.uuid\n calcinfo.local_copy_list = local_copy_list\n calcinfo.remote_copy_list = remote_copy_list\n calcinfo.remote_symlink_list = remote_symlink_list\n\n codeinfo = CodeInfo()\n codeinfo.code_uuid = code.uuid\n #codeinfo.withmpi = True # Current version of W90 can be run in parallel\n codeinfo.cmdline_params = [self._INPUT_FILE]\n\n calcinfo.codes_info = [codeinfo]\n calcinfo.codes_run_mode = code_run_modes.SERIAL\n\n # Retrieve files\n calcinfo.retrieve_list = []\n calcinfo.retrieve_list.append(self._OUTPUT_FILE)\n calcinfo.retrieve_list.append(self._ERROR_FILE)\n if pp_setup:\n calcinfo.retrieve_list.append(self._NNKP_FILE)\n calcinfo.retrieve_singlefile_list = [\n ('output_nnkp', 'singlefile', self._NNKP_FILE)\n ]\n\n calcinfo.retrieve_list += [\n '{}_band.dat'.format(self._SEEDNAME),\n '{}_band.kpt'.format(self._SEEDNAME)\n ]\n\n if settings_dict.pop('retrieve_hoppings', False):\n calcinfo.retrieve_list += [\n '{}_wsvec.dat'.format(self._SEEDNAME),\n '{}_hr.dat'.format(self._SEEDNAME),\n '{}_centres.xyz'.format(self._SEEDNAME),\n ]\n\n # Retrieves bands automatically, if they are calculated\n\n calcinfo.retrieve_list += settings_dict.pop(\n \"additional_retrieve_list\", []\n )\n\n # pop input keys not used here\n settings_dict.pop('seedname', None)\n if settings_dict:\n raise InputValidationError(\n \"The following keys in settings are unrecognized: {}\".format(\n settings_dict.keys()\n )\n )\n\n return calcinfo", "def test_format_map():\n template_filelist = listdir(RTEMPLATE_PATH)\n\n R_files = []\n json_files = []\n for file in template_filelist:\n if '.r' in file:\n file = file.replace('.r', '')\n R_files.append(file)\n elif '.json' in file:\n file = file.replace('.json', '')\n json_files.append(file)\n\n\n for template in R_files:\n template_filepath = path.join(RTEMPLATE_PATH, template + '.r')\n metadata_filepath = path.join(RTEMPLATE_PATH, template + '.json')\n\n with open(template_filepath, 'r') as t_fp:\n r_text = t_fp.read()\n\n try:\n with open(metadata_filepath, 'r') as m_fp:\n metadata = json.load(m_fp)\n except Exception as err:\n print(metadata_filepath)\n raise err\n\n format_dict = {}\n for key in metadata['required_args']:\n format_dict[key] = 'TEST'\n\n ## Actual test: apply `format_map` to r_text#\n try:\n r_text = r_text.format_map(format_dict)\n except Exception as err:\n print(template_filepath)\n raise err", "def test_update(self):\n # creating a new sample template\n st = SampleTemplate.create(self.metadata, self.new_study)\n # updating the sample template\n st.update(self.metadata_dict_updated)\n\n # validating values\n exp = self.metadata_dict_updated_dict['Sample1'].values()\n obs = st.get('2.Sample1').values()\n self.assertItemsEqual(obs, exp)\n\n exp = self.metadata_dict_updated_dict['Sample2'].values()\n obs = st.get('2.Sample2').values()\n self.assertItemsEqual(obs, exp)\n\n exp = self.metadata_dict_updated_dict['Sample3'].values()\n obs = st.get('2.Sample3').values()\n self.assertItemsEqual(obs, exp)\n\n # checking errors\n with self.assertRaises(QiitaDBError):\n st.update(self.metadata_dict_updated_sample_error)\n with self.assertRaises(QiitaDBError):\n st.update(self.metadata_dict_updated_column_error)", "def _update_template(template_path):\n template_definition = template_path\n\n # template output directory is output/templates, so need to create that location before pulling out the templates\n template_location = template_utilities.get_template_directory()\n\n # Install the template and get the path to the template directory for updating the configuration file.\n templates_path = template_utilities.install_template(template_location, template_definition)\n\n if templates_path:\n # Now need to find the templates definition of that zip file and locate it in the file system so that it can be\n settings = get_configuration()\n\n # Override the configuration details with the new template path. This should probably be handled by the\n # publishing plugin, but for now this will work\n settings.publishing.templates = str(templates_path.relative_to(get_configuration_root()))\n configuration_file_path = get_configuration_root() / 'config.yaml'\n\n dump_configuration(configuration_file_path, settings)", "def calc_template(template_def, config):\n template = Template(**template_def)\n #print \"template_def:\", template_def, \"config:\", config\n try:\n retvals = process_template(template, config, target=(None, None))\n except Exception:\n print(\"==== template ====\"); pprint(template_def)\n print(\"==== config ====\"); pprint(config)\n #traceback.print_exc()\n raise\n output = {}\n for rkey, rv in retvals.items():\n module_id, terminal_id = rkey\n module_key = str(module_id)\n output.setdefault(module_key, {})\n output[module_key][terminal_id] = rv.todict()\n return output", "def update_with_template_args(args, list_args=None):\r\n if not args.get('--template'):\r\n return\r\n\r\n list_args = list_args or []\r\n\r\n template_path = args.pop('--template')\r\n if not os.path.exists(template_path):\r\n raise ArgumentError(\r\n 'File does not exist [-t | --template] = %s'\r\n % template_path)\r\n\r\n config = configparser.ConfigParser()\r\n ini_str = '[settings]\\n' + open(\r\n os.path.expanduser(template_path), 'r').read()\r\n ini_fp = StringIO(ini_str)\r\n config.readfp(ini_fp)\r\n\r\n # Merge template options with the options passed in\r\n for key, value in config.items('settings'):\r\n option_key = '--%s' % key\r\n if option_key in list_args:\r\n value = value.split(',')\r\n if not args.get(option_key):\r\n args[option_key] = value", "def updateFieldsForInput(self, input_name, fields_dict, input_field_name=\"Name\"):\r\n \r\n where_clause = \"%s = \\'%s\\'\" % (input_field_name, input_name)\r\n self.arc_table_utils.updateFields(self.fullpath, fields_dict, {'where_clause':where_clause})", "def op_replace(template_data, field_hierarchy_list, mappings):\n op_trace = []\n\n def replace(json_obj, search_list):\n if len(search_list) == 0:\n return\n\n if isinstance(json_obj, dict):\n for key, value in json_obj.items():\n if key == search_list[0]:\n if len(search_list) > 1:\n replace(value, search_list[1:])\n elif isinstance(value, Hashable):\n new_val = mappings.get(value)\n if new_val is not None:\n op_trace.append('Template {name}, updated {path}: '\n '{from_val} -> {to_val}'.format(name=template_data['templateName'],\n path='/'.join(field_hierarchy_list),\n from_val=value,\n to_val=new_val))\n json_obj[key] = new_val\n else:\n replace(value, search_list)\n\n elif isinstance(json_obj, list):\n for elem in json_obj:\n replace(elem, search_list)\n\n replace(template_data, field_hierarchy_list)\n\n return op_trace", "def apply_replicator(self, replicator, template_values, template_index=-1,\n template_value=None, update_input_references=False):\n return {}", "def cheetah_template(self, pre=False):\n if self.is_req_output:\n cht_tmpl = self.req_out_chth\n return cht_tmpl.substitute(self.xml_out)\n elif self.is_output:\n xml_out = self.xml_out\n xml_out['out_sel_name'] = self.out_sel_name\n cht_tmpl = self.file_chth\n return cht_tmpl.substitute(self.xml_out)\n elif self.is_input and not pre:\n if self.pname in self.gen_in_fmt:\n if self.gen_in_fmt[self.pname] == 'vcf,vcf_bgzip':\n cht_tmpl = self.vcf_choose\n else:\n cht_tmpl = PercentTemplate(self.reg_arg)\n elif self.pname in self.tool_data[self.tool_name]['input_fmt']:\n cht_tmpl = self.req_out_chth\n return cht_tmpl.substitute(self.xml_out)\n elif self.is_input and pre:\n cht_tmpl = self.vcf_tabix\n return cht_tmpl.substitute(self.xml_out)\n else:\n if self.xml_out['section'] not in ['required']:\n template_string = self.ext_arg\n else:\n template_string = self.reg_arg\n if self.xml_out['type'] == 'boolean':\n cht_tmpl = PercentTemplate(template_string.replace('%argument ', ''))\n else:\n cht_tmpl = PercentTemplate(template_string)\n return cht_tmpl.substitute(self.xml_out)", "def run_template(template, config):\n all_results = {}\n for nodenum, wires in template:\n # Find the modules\n node = template.modules[nodenum]\n module_id = node['module'] #template.modules[node]\n module = lookup_module(module_id)\n inputs = _map_inputs(module, wires)\n \n # substitute values for inputs\n kwargs = dict((k, _lookup_results(all_results, v)) \n for k, v in inputs.items())\n \n # Include configuration information\n kwargs.update(node.get('config', {}))\n kwargs.update(config[nodenum])\n result = module.action(**kwargs)\n# print result\n all_results[nodenum] = result\n \n# return all_results\n# FIXXXXXXXXXXXXXXXXXXXXXX ***********************\n from .offspecular.instruments import convert_to_plottable\n return [convert_to_plottable(value['output']) if 'output' in value else {} for key, value in all_results.items()]", "def render_template(template_path, output_path, values):\n with open(template_path) as template_file:\n template = template_file.read()\n\n rendered = template.format(**values)\n with open(output_path, 'w') as output_file:\n output_file.write(rendered)" ]
[ "0.7439741", "0.72382534", "0.60887206", "0.59132934", "0.5788574", "0.56543916", "0.56279784", "0.5626978", "0.56007934", "0.5551346", "0.5531838", "0.54618365", "0.54196817", "0.5366506", "0.5351712", "0.5350561", "0.5340694", "0.5319279", "0.5318077", "0.53120196", "0.53017884", "0.5294785", "0.5286759", "0.52751625", "0.5268135", "0.5214826", "0.52070135", "0.5178805", "0.51627004", "0.5151686" ]
0.7494106
0
Formatting the field template based on the values from inputs. Taking into account that the field with a template can be a MultiOutputFile and the field values needed in the template can be a list returning a list of formatted templates in that case. Allowing for multiple input values used in the template as longs as there is no more than one file (i.e. File, PathLike or string with extensions)
def _template_formatting(field, inputs, inputs_dict_st): from .specs import MultiInputObj, MultiOutputFile # if a template is a function it has to be run first with the inputs as the only arg template = field.metadata["output_file_template"] if callable(template): template = template(inputs) # as default, we assume that keep_extension is True keep_extension = field.metadata.get("keep_extension", True) inp_fields = re.findall(r"{\w+}", template) inp_fields_fl = re.findall(r"{\w+:[0-9.]+f}", template) inp_fields += [re.sub(":[0-9.]+f", "", el) for el in inp_fields_fl] if len(inp_fields) == 0: return template val_dict = {} file_template = None for fld in inp_fields: fld_name = fld[1:-1] # extracting the name form {field_name} if fld_name not in inputs_dict_st: raise AttributeError(f"{fld_name} is not provided in the input") fld_value = inputs_dict_st[fld_name] if fld_value is attr.NOTHING: # if value is NOTHING, nothing should be added to the command return attr.NOTHING else: # checking for fields that can be treated as a file: # have type File, or value that is path like (including str with extensions) if isinstance(fld_value, os.PathLike) or ( isinstance(fld_value, str) and "." in fld_value ): if file_template: raise Exception( f"can't have multiple paths in {field.name} template," f" but {template} provided" ) else: file_template = (fld_name, fld_value) else: val_dict[fld_name] = fld_value # if field is MultiOutputFile and some elements from val_dict are lists, # each element of the list should be used separately in the template # and return a list with formatted values if field.type is MultiOutputFile and any( [isinstance(el, (list, MultiInputObj)) for el in val_dict.values()] ): # all fields that are lists keys_list = [ k for k, el in val_dict.items() if isinstance(el, (list, MultiInputObj)) ] if any( [len(val_dict[key]) != len(val_dict[keys_list[0]]) for key in keys_list[1:]] ): raise Exception( f"all fields used in {field.name} template have to have the same length" f" or be a single value" ) formatted_value = [] for ii in range(len(val_dict[keys_list[0]])): val_dict_el = copy(val_dict) # updating values to a single element from the list for key in keys_list: val_dict_el[key] = val_dict[key][ii] formatted_value.append( _element_formatting( template, val_dict_el, file_template, keep_extension=keep_extension ) ) else: formatted_value = _element_formatting( template, val_dict, file_template, keep_extension=keep_extension ) return formatted_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _element_formatting(template, values_template_dict, file_template, keep_extension):\n if file_template:\n fld_name_file, fld_value_file = file_template\n # splitting the filename for name and extension,\n # the final value used for formatting depends on the template and keep_extension flag\n name, *ext = Path(fld_value_file).name.split(\".\", maxsplit=1)\n filename = str(Path(fld_value_file).parent / name)\n # updating values_template_dic with the name of file\n values_template_dict[fld_name_file] = filename\n # if keep_extension is False, the extensions are removed\n if keep_extension is False:\n ext = []\n else:\n ext = []\n\n # if file_template is at the end of the template, the simplest formatting should work\n if file_template and template.endswith(f\"{{{fld_name_file}}}\"):\n # recreating fld_value with the updated extension\n values_template_dict[fld_name_file] = \".\".join([filename] + ext)\n formatted_value = template.format(**values_template_dict)\n # file_template provided, but the template doesn't have its own extension\n elif file_template and \".\" not in template:\n # if the fld_value_file has extension, it will be moved to the end\n formatted_value = \".\".join([template.format(**values_template_dict)] + ext)\n # template has its own extension or no file_template provided\n # the simplest formatting, if file_template is provided it's used without the extension\n else:\n formatted_value = template.format(**values_template_dict)\n return formatted_value", "def parse_template(template):\n field_name = None\n field_value = []\n\n for line in template.strip().split('\\n') + ['end:']:\n if line.startswith('#'):\n continue\n match = RE_TEMPLATE_FIELD_LINE.match(line)\n if match:\n if field_name is not None:\n yield (field_name, '\\n'.join(field_value).strip())\n elif len(field_value) > 0:\n logging.warning('Ignoring lines: %r', field_value)\n\n field_name = match.group(1)\n field_value = [match.group(2)]\n else:\n field_value.append(line)", "def render_fields(form, args):\n output = \"\"\n fields = get_fields(form, args)\n for field in fields:\n output += render_field(field)\n return mark_safe(output)", "def valueformat(value, format_list):\n\n # print(\"\\n\", format_list, value)\n concat_key = format_list.split('.')\n # Pass in either the key of the field\n # or pass in resource.key to enable a resource lookup.\n key = \"\"\n resource = \"\"\n member_id = \"\"\n key_sequence = [key, resource, member_id]\n count = 0\n for r in reversed(concat_key):\n key_sequence[count] = r\n count += 1\n\n # print(\"Concat_key:\", concat_key)\n key = key_sequence[0]\n resource = key_sequence[1]\n member_id = key_sequence[2]\n\n # print(\"Key:\", key)\n\n if key:\n if key.lower() == \"address\":\n return dt_address(value)\n\n elif key.lower() == \"telecom\":\n return dt_telecom(value)\n\n elif key.lower() == \"name\":\n return dt_name(value)\n elif key.lower() == 'dosage':\n return dt_dosage(value)\n elif key.lower() == 'medicationreference':\n # print(\"Working on\", key, \": \", value)\n # f_value = value\n # lookup field_formats\n # concat_key should have a resource name\n # print(\"\\n\\nRESOURCE:\", resource)\n # print(\"calling dt_medicationreference with Resource:\", resource, \", value:\", value)\n return dt_medicationreference(value, member_id, resource)\n elif key.lower() == 'dataabsentreason':\n if isinstance(value, dict):\n return value['coding'][0]['display']\n else:\n return value\n elif key.lower() == 'valuequantity':\n # return str(value['value']) + \" \" + value['unit']\n return dt_valuequantity(value)\n elif key.lower() == 'valuestring':\n return value\n elif key.lower() == 'interpretation':\n return value['coding'][0]['display']\n elif key.lower() == 'referencerange':\n return dt_referencerange(value)\n elif key.lower() == 'requester':\n if 'display' in value['agent']:\n return dt_reference(value['agent'], member_id)\n elif key.lower() == 'practitioner':\n if 'display' in value:\n return dt_reference(value, member_id)\n elif key.lower() == 'organization':\n if 'display' in value:\n return dt_reference(value, member_id)\n # elif key.lower() == \"result\":\n # return dt_reference(value[0], member_id)\n elif key.lower() == 'practitioner':\n if 'display' in value:\n return dt_reference(value, member_id)\n elif key.lower() == 'organization':\n if 'display' in value:\n return dt_reference(value, member_id)\n elif key.lower() == 'participant':\n if 'display' in value[0]['individual']:\n return dt_reference(value[0]['individual'], member_id)\n elif key.lower() == 'location':\n if 'display' in value[0]['location']:\n return dt_reference(value[0]['location'], member_id)\n elif key.lower() == 'communication':\n return dt_communication(value)\n else:\n # print(\"value:\", value, \" type:\", type(value), \" for: \", key)\n return value", "def string_factory(list_of_dicts):\n result = []\n for item in range(len(list_of_dicts)):\n result.append(template.format(**list_of_dicts[item]))\n return result", "def parse_templated_fields(metadata):\n parse_dict = {}\n for field in metadata:\n if \"configurations\" not in field:\n parse_dict.update({field: metadata[field]})\n else:\n parse_dict.update(get_config(metadata, field))\n\n # looping over config sections:\n for config_sec, configs in metadata.items():\n # looping over each field in the current config section\n for cur_key, cur_val in configs.items():\n if cur_val[\"type\"] not in [\"string\", \"array\"]:\n continue # not string fields, template does not support\n \n if cur_val[\"type\"] == \"string\" and \"{\" in cur_val and \"}\" in cur_val[\"value\"]:\n cur_val[\"value\"] = cur_val[\"value\"].format(**parse_dict)\n else: # array\n for index, s in enumerate(cur_val[\"value\"]):\n cur_val[\"value\"][index] = s.format(**parse_dict)\n \n metadata[config_sec][cur_key][\"value\"] = cur_val[\"value\"]\n \n return metadata", "def get_template(self):\n return self.sep.join([self.htmls[html] for html in self.lang]).format(**self.fields)", "def format_template(template, *args):\n return textwrap.dedent(template % args).strip()", "def get_template(self, format):\n for pattern, converter in self._patterns:\n if converter.format == format:\n template = pattern.generate('{name}')\n if template:\n return template\n return '{name}' f'.{format}'", "def format_fields(field_data, include_empty=True):\n max_label = 0\n for (label, value) in field_data:\n label_length = len(label)\n if label_length > max_label:\n max_label = label_length\n\n fields = []\n for (label, value) in field_data:\n empty = str(value).strip() in ['', 'None']\n if not empty or include_empty:\n label_length = len(label.strip())\n extra_spaces = ' ' * (max_label - label_length)\n label_sep = ':' + extra_spaces + ' '\n joined_field = label_sep.join([label, str(value)])\n fields.append(joined_field)\n\n return '\\n'.join(fields)", "def format_field(self, value, spec):\n cache = Cache()\n if spec == \"co\":\n # if cache(re.match(\"(.*)co$\", spec)):\n value = co_join(value)\n spec = \"s\"\n # cache.output.group(1) + \"s\"\n elif cache(re.match(r\"^sub(\\d?)_?(.*)$\", spec)):\n depth = (1 if cache.output.group(1) == \"\" else\n int(cache.output.group(1)))\n value = \"\\n\".join([\n \"{0}{1} = {2}\".format(depth * \" \", key, val)\n for key, val in value.items()])\n if cache.output.group(2) != \"\":\n value = (\n depth * \"[\" + cache.output.group(2) + depth * \"]\" + \"\\n\" +\n value)\n spec = \"s\"\n return super(Format, self).format_field(value, spec)", "def glob_fmt(pattern: str, *templates: Strings) -> List[str]:\n results: List[str] = []\n for wildcards in glob_extract(pattern):\n for template in each_string(*templates):\n results.append(copy_annotations(template, template.format(**wildcards)))\n return results", "def format_input(self, args):\n\n new_list = []\n if args[1].find('{') != -1:\n new_list = self.format_dicti(args)\n return new_list\n else:\n new_list = []\n new_list.append(args[0])\n new_str = args[1][ args[1].find('(') + 2 : args[1].find(',') - 1]\n new_str += args[1][ args[1].find(',') : args[1].find(')') - 0]\n new_list.append(\" \".join(new_str.split(\", \") ) )\n\n return \" \".join(i for i in new_list)", "def format_substitutions(subs: Union[SubstituteTerm, List[SubstituteTerm]]):\n text = \"\"\n if isinstance(subs, SubstituteTerm):\n term_str = str(subs)\n for line in term_str.split('\\n'):\n text += Markup.escape(line) + Markup('<br />')\n text += Markup('<br />')\n return text\n for term in subs:\n term_str = str(term)\n for line in term_str.split('\\n'):\n text += Markup.escape(line) + Markup('<br />')\n text += Markup('<br />')\n return text", "def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))", "def _template(inlist):\n from collections import OrderedDict\n if isinstance(inlist, str):\n inlist = [inlist]\n\n templates = []\n for item in reversed(inlist):\n templates.append(output_space(item))\n\n return OrderedDict(reversed(OrderedDict(templates).items()))", "def cheetah_template(self, pre=False):\n if self.is_req_output:\n cht_tmpl = self.req_out_chth\n return cht_tmpl.substitute(self.xml_out)\n elif self.is_output:\n xml_out = self.xml_out\n xml_out['out_sel_name'] = self.out_sel_name\n cht_tmpl = self.file_chth\n return cht_tmpl.substitute(self.xml_out)\n elif self.is_input and not pre:\n if self.pname in self.gen_in_fmt:\n if self.gen_in_fmt[self.pname] == 'vcf,vcf_bgzip':\n cht_tmpl = self.vcf_choose\n else:\n cht_tmpl = PercentTemplate(self.reg_arg)\n elif self.pname in self.tool_data[self.tool_name]['input_fmt']:\n cht_tmpl = self.req_out_chth\n return cht_tmpl.substitute(self.xml_out)\n elif self.is_input and pre:\n cht_tmpl = self.vcf_tabix\n return cht_tmpl.substitute(self.xml_out)\n else:\n if self.xml_out['section'] not in ['required']:\n template_string = self.ext_arg\n else:\n template_string = self.reg_arg\n if self.xml_out['type'] == 'boolean':\n cht_tmpl = PercentTemplate(template_string.replace('%argument ', ''))\n else:\n cht_tmpl = PercentTemplate(template_string)\n return cht_tmpl.substitute(self.xml_out)", "def get_formatted_messages(formats, label, context):\r\n format_templates = {}\r\n for format in formats:\r\n # conditionally turn off autoescaping for .txt extensions in format\r\n if format.endswith(\".txt\"):\r\n context.autoescape = False\r\n format_templates[format] = render_to_string((\r\n 'notification/%s/%s' % (label, format),\r\n 'notification/%s' % format), context_instance=context)\r\n return format_templates", "def output_format(result):\n if 'value' in result and isinstance(result['value'], list):\n result = result['value']\n obj_list = result if isinstance(result, list) else [result]\n return [_format_group(item) for item in obj_list]", "def check_template_fields(self, model: Dict[str, Any], collection: str) -> bool:\n errors = False\n for template_field in self.get_fields(collection):\n if not isinstance(template_field, BaseTemplateField):\n continue\n field_error = False\n replacements = model.get(template_field.get_template_field_name())\n\n if replacements is None:\n replacements = []\n\n if not isinstance(replacements, list):\n self.errors.append(\n f\"{collection}/{model['id']}/{template_field.get_own_field_name()}: Replacements for the template field must be a list\"\n )\n field_error = True\n continue\n for replacement in replacements:\n if not isinstance(replacement, str):\n self.errors.append(\n f\"{collection}/{model['id']}/{template_field.get_own_field_name()}: Each replacement for the template field must be a string\"\n )\n field_error = True\n if field_error:\n errors = True\n continue\n replacement_collection = None\n if template_field.replacement_collection:\n replacement_collection = (\n template_field.replacement_collection.collection\n )\n\n for replacement in replacements:\n structured_field = self.make_structured(template_field, replacement)\n if structured_field not in model:\n self.errors.append(\n f\"{collection}/{model['id']}/{template_field.get_own_field_name()}: Missing {structured_field} since it is given as a replacement\"\n )\n errors = True\n\n if replacement_collection:\n try:\n as_id = int(replacement)\n except (TypeError, ValueError):\n self.errors.append(\n f\"{collection}/{model['id']}/{template_field.get_own_field_name()}: Replacement {replacement} is not an integer\"\n )\n if not self.find_model(replacement_collection, as_id):\n self.errors.append(\n f\"{collection}/{model['id']}/{template_field.get_own_field_name()}: Replacement {replacement} does not exist as a model of collection {replacement_collection}\"\n )\n\n for field in model.keys():\n if self.is_structured_field(field):\n try:\n _template_field, _replacement = self.to_template_field(\n collection, field\n )\n if (\n template_field.get_own_field_name() == _template_field\n and _replacement\n not in model[template_field.get_own_field_name()]\n ):\n self.errors.append(\n f\"{collection}/{model['id']}/{field}: Invalid structured field. Missing replacement {_replacement} in {template_field.get_own_field_name()}\"\n )\n errors = True\n except CheckException as e:\n self.errors.append(\n f\"{collection}/{model['id']}/{field} error: \" + str(e)\n )\n errors = True\n\n return errors", "def template(self) -> str:\n arguments = []\n for arg in self.arg_list:\n flag = arg._flag\n arg = _flag_to_arg(flag)\n placeholder = _arg_to_empty(arg)\n arguments.append(placeholder)\n return ' '.join(arguments)", "def _generate_template(dictionary):\n task_dict = dict(dictionary)\n lines = []\n for key in sorted(TaskInfo._READ_ONLY_FIELDS):\n if key not in task_dict:\n continue\n\n value = TaskInfo._dpop(task_dict, key)\n lines.extend([\n \"# {}:\".format(key),\n \"# {}\".format(\"\\n#\".join(value.splitlines())),\n \"\",\n ])\n\n for key in sorted(task_dict.keys()):\n lines.extend([\n \"{}:\".format(key),\n str(task_dict[key]),\n \"\",\n ])\n\n return \"\\n\".join(lines)", "def output_format(times_list):\n formatted_free_times = []\n for i in times_list:\n fmt_str = \"{} to {}.\".format(\n i[0].format('ddd, MMM D, h:mm a'),\n i[1].format('ddd, MMM D, h:mm a'))\n formatted_free_times.append(fmt_str)\n return formatted_free_times", "def decorate_template(mlist, template, extradict=None):\n # Create a dictionary which includes the default set of interpolation\n # variables allowed in headers and footers. These will be augmented by\n # any key/value pairs in the extradict.\n substitutions = {\n key: getattr(mlist, key)\n for key in ('fqdn_listname',\n 'list_name',\n 'mail_host',\n 'display_name',\n 'request_address',\n 'description',\n 'info',\n )\n }\n if extradict is not None:\n substitutions.update(extradict)\n text = expand(template, mlist, substitutions)\n # Turn any \\r\\n line endings into just \\n\n return re.sub(r' *\\r?\\n', r'\\n', text)", "def expand(*templates: Strings, **kwargs: Strings) -> List[str]:\n formats = flatten(*templates)\n results: List[str] = []\n data: Dict[str, Any] = {}\n\n def _collect(items: List[Tuple[str, Strings]]) -> None:\n if len(items) == 0:\n for template in formats:\n results.append(template.format(**data))\n else:\n name, values = items[0]\n for value in flatten(values):\n data[name] = value\n _collect(items[1:])\n\n _collect(list(kwargs.items()))\n\n return results", "def htmlValue(self, REQUEST):\n\n value = REQUEST.form.get(self.__name__, 'No Input')\n\n header = ''\n for col in self.columnDefs:\n header += \"<th>%s</th>\" % col['columnTitle']\n\n res = '<table class=\"listing\"><thead><tr>%s</tr></thead><tbody>' % header\n for adict in value:\n if adict.get('orderindex_', '') != 'template_row_marker':\n res += \"<tr>\"\n for col in self.columnDefs:\n akey = col['columnId']\n if col['columnType'] == \"File\":\n file = adict[akey]\n file.seek(0)\n fdata = file.read()\n filename = file.filename\n mimetype, enc = guess_content_type(filename, fdata, None)\n out = \"%s %s: %s bytes\" % (filename, mimetype, len(fdata))\n res = \"%s\\n<td>%s</td>\" % (res, cgi.escape(out))\n else:\n res = \"%s\\n<td>%s</td>\" % (res, cgi.escape(adict[akey]))\n res += \"</tr>\"\n\n return \"%s</tbody></table>\" % res", "def _fill_template(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n\n line_object_keys = [\"quickReply\", \"items\", \"action\", \"template\", \"actions\"]\n\n if type(template) == list:\n for item in template:\n self._fill_template(item, template_vars)\n else:\n self._fill_template_text(template, template_vars)\n for key in line_object_keys:\n if key in template:\n self._fill_template(template[key], template_vars)\n\n return template", "def list_template(self, **kwargs):\n\n all_params = ['pretty', 'label_selector', 'field_selector', 'watch', 'resource_version', 'timeout_seconds']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method list_template\" % key\n )\n params[key] = val\n del params['kwargs']\n\n\n resource_path = '/oapi/v1/templates'.replace('{format}', 'json')\n path_params = {}\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n if 'label_selector' in params:\n query_params['labelSelector'] = params['label_selector']\n if 'field_selector' in params:\n query_params['fieldSelector'] = params['field_selector']\n if 'watch' in params:\n query_params['watch'] = params['watch']\n if 'resource_version' in params:\n query_params['resourceVersion'] = params['resource_version']\n if 'timeout_seconds' in params:\n query_params['timeoutSeconds'] = params['timeout_seconds']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'GET',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1TemplateList',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def get_default_template(env):\n return env.from_string(\n \"\"\"\\\n{% if record.standard_information and record.filename_information %}\n0|{{ prefix }}{{ record.path }}|{{ record.inode }}|0|{{ record.standard_information.owner_id }}|0|{{ record.size }}|{{ record.standard_information.accessed|unixtimestampformat }}|{{ record.standard_information.modified|unixtimestampformat }}|{{ record.standard_information.changed|unixtimestampformat }}|{{ record.standard_information.created|unixtimestampformat }}\n{% endif %}\n{% if record.standard_information and record.filename_information %}\n0|{{ prefix }}{{ record.path }} (filename)|{{ record.inode }}|0|{{ record.standard_information.owner_id }}|0|{{ record.size }}|{{ record.filename_information.accessed|unixtimestampformat }}|{{ record.filename_information.modified|unixtimestampformat }}|{{ record.filename_information.changed|unixtimestampformat }}|{{ record.filename_information.created|unixtimestampformat }}\n{% endif %}\n{% for e in record.indx_entries %}\n0|{{ prefix }}{{ record.path }}\\\\{{ e.name }} (INDX)|{{ e.inode }}|0|0|0|{{ e.logical_size }}|{{ e.accessed|unixtimestampformat }}|{{ e.modified|unixtimestampformat }}|{{ e.changed|unixtimestampformat }}|{{ e.created|unixtimestampformat }}\n{% endfor %}\n{% for e in record.slack_indx_entries %}\n0|{{ prefix }}{{ record.path }}\\\\{{ e.name }} (slack-INDX)|{{ e.inode }}|0|0|0|{{ e.logical_size }}|{{ e.accessed|unixtimestampformat }}|{{ e.modified|unixtimestampformat }}|{{ e.changed|unixtimestampformat }}|{{ e.created|unixtimestampformat }}\n{% endfor %}\n\"\"\"\n )", "def edit_template(self, template):\n macro = []\n for line in template:\n search_result = re.search(line_to_edit, line)\n if search_result != None:\n line = line[(search_result.start()+1):]\n if line.find(\"/rat/procset\") >= 0:\n parts = line.split(\" \")\n new_line = parts[0] + \" \" + parts[1] + \" \\\"\"\n new_line += self._path\n new_line += \"\\\"\\n\"\n line = new_line\n if line.find(\"/generator/add\") >= 0:\n parts = line.split(\" \")\n new_line = parts[0] + \" \" + parts[1] + \" \"\n parts2 = parts[2].split(\":\")\n new_line += self._generator.get_generator() + \":\" + parts2[1]\n new_line += \"\\n\"\n line = new_line\n if line.find(\"/generator/vtx/set\") >= 0:\n parts = line.split(\" \")\n new_line = parts[0] + \" \"\n new_line += self._generator.get_type() + \" \"\n new_line += self._generator.get_isotope().get_name() + \" \"\n new_line += str(int(self._generator.get_level())) + \" \"\n new_line += str(int(self._generator.get_mode())) + \" \"\n new_line += str(self._generator.get_e_lo()) + \" \"\n new_line += str(self._generator.get_e_hi())\n new_line += \"\\n\"\n line = new_line\n try:\n assert(re.match\\\n (r\"^.*\\s[0-9]{1,2}?\\s[0-9]{1,2}?\\s\"\n \"[0-9]+\\.[0-9]+\\s[0-9]+\\.[0-9]+$\", line)\\\n != None),\\\n \"/generator/vtx/set line has incorrect format\"\n except AssertionError as detail:\n print \"generate_spectrum.edit_template: error,\", detail\n sys.exit(1)\n if line.find(\"/rat/run/start\") >= 0:\n parts = line.split(\" \")\n new_line = parts[0] + \" \" + str(self._n_events)\n new_line += \"\\n\"\n line = new_line\n macro.append(line)\n return macro" ]
[ "0.653768", "0.5750648", "0.56986177", "0.53870606", "0.53678", "0.5362524", "0.5332941", "0.52641904", "0.52122486", "0.51070577", "0.50978017", "0.50938904", "0.50394", "0.5031101", "0.5029822", "0.50264037", "0.50131285", "0.49867523", "0.4916825", "0.49085638", "0.4908232", "0.4893599", "0.48861998", "0.48844856", "0.48800114", "0.4856569", "0.4838214", "0.48380226", "0.4837804", "0.4835788" ]
0.8142701
0
Formatting a single template for a single element (if a list). Taking into account that a file used in the template (file_template) and the template itself could have file extensions (assuming that if template has extension, the field value extension is removed, if field has extension, and no template extension, than it is moved to the end). For values_template_dict the simple formatting can be used (no file values inside)
def _element_formatting(template, values_template_dict, file_template, keep_extension): if file_template: fld_name_file, fld_value_file = file_template # splitting the filename for name and extension, # the final value used for formatting depends on the template and keep_extension flag name, *ext = Path(fld_value_file).name.split(".", maxsplit=1) filename = str(Path(fld_value_file).parent / name) # updating values_template_dic with the name of file values_template_dict[fld_name_file] = filename # if keep_extension is False, the extensions are removed if keep_extension is False: ext = [] else: ext = [] # if file_template is at the end of the template, the simplest formatting should work if file_template and template.endswith(f"{{{fld_name_file}}}"): # recreating fld_value with the updated extension values_template_dict[fld_name_file] = ".".join([filename] + ext) formatted_value = template.format(**values_template_dict) # file_template provided, but the template doesn't have its own extension elif file_template and "." not in template: # if the fld_value_file has extension, it will be moved to the end formatted_value = ".".join([template.format(**values_template_dict)] + ext) # template has its own extension or no file_template provided # the simplest formatting, if file_template is provided it's used without the extension else: formatted_value = template.format(**values_template_dict) return formatted_value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _template_formatting(field, inputs, inputs_dict_st):\n from .specs import MultiInputObj, MultiOutputFile\n\n # if a template is a function it has to be run first with the inputs as the only arg\n template = field.metadata[\"output_file_template\"]\n if callable(template):\n template = template(inputs)\n\n # as default, we assume that keep_extension is True\n keep_extension = field.metadata.get(\"keep_extension\", True)\n\n inp_fields = re.findall(r\"{\\w+}\", template)\n inp_fields_fl = re.findall(r\"{\\w+:[0-9.]+f}\", template)\n inp_fields += [re.sub(\":[0-9.]+f\", \"\", el) for el in inp_fields_fl]\n if len(inp_fields) == 0:\n return template\n\n val_dict = {}\n file_template = None\n\n for fld in inp_fields:\n fld_name = fld[1:-1] # extracting the name form {field_name}\n if fld_name not in inputs_dict_st:\n raise AttributeError(f\"{fld_name} is not provided in the input\")\n fld_value = inputs_dict_st[fld_name]\n if fld_value is attr.NOTHING:\n # if value is NOTHING, nothing should be added to the command\n return attr.NOTHING\n else:\n # checking for fields that can be treated as a file:\n # have type File, or value that is path like (including str with extensions)\n if isinstance(fld_value, os.PathLike) or (\n isinstance(fld_value, str) and \".\" in fld_value\n ):\n if file_template:\n raise Exception(\n f\"can't have multiple paths in {field.name} template,\"\n f\" but {template} provided\"\n )\n else:\n file_template = (fld_name, fld_value)\n else:\n val_dict[fld_name] = fld_value\n\n # if field is MultiOutputFile and some elements from val_dict are lists,\n # each element of the list should be used separately in the template\n # and return a list with formatted values\n if field.type is MultiOutputFile and any(\n [isinstance(el, (list, MultiInputObj)) for el in val_dict.values()]\n ):\n # all fields that are lists\n keys_list = [\n k for k, el in val_dict.items() if isinstance(el, (list, MultiInputObj))\n ]\n if any(\n [len(val_dict[key]) != len(val_dict[keys_list[0]]) for key in keys_list[1:]]\n ):\n raise Exception(\n f\"all fields used in {field.name} template have to have the same length\"\n f\" or be a single value\"\n )\n formatted_value = []\n for ii in range(len(val_dict[keys_list[0]])):\n val_dict_el = copy(val_dict)\n # updating values to a single element from the list\n for key in keys_list:\n val_dict_el[key] = val_dict[key][ii]\n\n formatted_value.append(\n _element_formatting(\n template, val_dict_el, file_template, keep_extension=keep_extension\n )\n )\n else:\n formatted_value = _element_formatting(\n template, val_dict, file_template, keep_extension=keep_extension\n )\n return formatted_value", "def parse_templated_fields(metadata):\n parse_dict = {}\n for field in metadata:\n if \"configurations\" not in field:\n parse_dict.update({field: metadata[field]})\n else:\n parse_dict.update(get_config(metadata, field))\n\n # looping over config sections:\n for config_sec, configs in metadata.items():\n # looping over each field in the current config section\n for cur_key, cur_val in configs.items():\n if cur_val[\"type\"] not in [\"string\", \"array\"]:\n continue # not string fields, template does not support\n \n if cur_val[\"type\"] == \"string\" and \"{\" in cur_val and \"}\" in cur_val[\"value\"]:\n cur_val[\"value\"] = cur_val[\"value\"].format(**parse_dict)\n else: # array\n for index, s in enumerate(cur_val[\"value\"]):\n cur_val[\"value\"][index] = s.format(**parse_dict)\n \n metadata[config_sec][cur_key][\"value\"] = cur_val[\"value\"]\n \n return metadata", "def fill_template_file(filename, value_dict):\n f = open(filename, 'r')\n text = f.read()\n f.close()\n f = open(filename, 'w')\n f.write(text % value_dict)\n f.close()", "def _fill_template_text(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n line_text_keys = [\"text\", \"altText\", \"label\", \"uri\"]\n try:\n for key in line_text_keys:\n if key in template:\n template[key] = template[key].format(**template_vars)\n except KeyError as e:\n logger.exception(\n \"Failed to fill line template '{}'. \"\n \"Tried to replace '{}' but could not find \"\n \"a value for it. There is no slot with this \"\n \"name nor did you pass the value explicitly \"\n \"when calling the template. Return template \"\n \"without filling the template. \"\n \"\".format(template, e.args[0]))\n return template", "def __fill_template__(self,template_file,output_fname):\n dictionary = {}\n for k,v in self.__dict__.iteritems():\n if k == 'sample_key':\n try:\n int(v)\n new_sample_key = \"Sample_\" + str(v)\n dictionary.update({k:new_sample_key})\n continue\n except ValueError:\n pass\n dictionary.update({k:str(v)})\n dictionary.update({'restats_tail': self.restats_file + '.tail'})\n with open(output_fname,'w') as f:\n string = fill_template(template_file,dictionary)\n f.write(string)", "def get_default_template(env):\n return env.from_string(\n \"\"\"\\\n{% if record.standard_information and record.filename_information %}\n0|{{ prefix }}{{ record.path }}|{{ record.inode }}|0|{{ record.standard_information.owner_id }}|0|{{ record.size }}|{{ record.standard_information.accessed|unixtimestampformat }}|{{ record.standard_information.modified|unixtimestampformat }}|{{ record.standard_information.changed|unixtimestampformat }}|{{ record.standard_information.created|unixtimestampformat }}\n{% endif %}\n{% if record.standard_information and record.filename_information %}\n0|{{ prefix }}{{ record.path }} (filename)|{{ record.inode }}|0|{{ record.standard_information.owner_id }}|0|{{ record.size }}|{{ record.filename_information.accessed|unixtimestampformat }}|{{ record.filename_information.modified|unixtimestampformat }}|{{ record.filename_information.changed|unixtimestampformat }}|{{ record.filename_information.created|unixtimestampformat }}\n{% endif %}\n{% for e in record.indx_entries %}\n0|{{ prefix }}{{ record.path }}\\\\{{ e.name }} (INDX)|{{ e.inode }}|0|0|0|{{ e.logical_size }}|{{ e.accessed|unixtimestampformat }}|{{ e.modified|unixtimestampformat }}|{{ e.changed|unixtimestampformat }}|{{ e.created|unixtimestampformat }}\n{% endfor %}\n{% for e in record.slack_indx_entries %}\n0|{{ prefix }}{{ record.path }}\\\\{{ e.name }} (slack-INDX)|{{ e.inode }}|0|0|0|{{ e.logical_size }}|{{ e.accessed|unixtimestampformat }}|{{ e.modified|unixtimestampformat }}|{{ e.changed|unixtimestampformat }}|{{ e.created|unixtimestampformat }}\n{% endfor %}\n\"\"\"\n )", "def render_template(template_path, output_path, values):\n with open(template_path) as template_file:\n template = template_file.read()\n\n rendered = template.format(**values)\n with open(output_path, 'w') as output_file:\n output_file.write(rendered)", "def valueformat(value, format_list):\n\n # print(\"\\n\", format_list, value)\n concat_key = format_list.split('.')\n # Pass in either the key of the field\n # or pass in resource.key to enable a resource lookup.\n key = \"\"\n resource = \"\"\n member_id = \"\"\n key_sequence = [key, resource, member_id]\n count = 0\n for r in reversed(concat_key):\n key_sequence[count] = r\n count += 1\n\n # print(\"Concat_key:\", concat_key)\n key = key_sequence[0]\n resource = key_sequence[1]\n member_id = key_sequence[2]\n\n # print(\"Key:\", key)\n\n if key:\n if key.lower() == \"address\":\n return dt_address(value)\n\n elif key.lower() == \"telecom\":\n return dt_telecom(value)\n\n elif key.lower() == \"name\":\n return dt_name(value)\n elif key.lower() == 'dosage':\n return dt_dosage(value)\n elif key.lower() == 'medicationreference':\n # print(\"Working on\", key, \": \", value)\n # f_value = value\n # lookup field_formats\n # concat_key should have a resource name\n # print(\"\\n\\nRESOURCE:\", resource)\n # print(\"calling dt_medicationreference with Resource:\", resource, \", value:\", value)\n return dt_medicationreference(value, member_id, resource)\n elif key.lower() == 'dataabsentreason':\n if isinstance(value, dict):\n return value['coding'][0]['display']\n else:\n return value\n elif key.lower() == 'valuequantity':\n # return str(value['value']) + \" \" + value['unit']\n return dt_valuequantity(value)\n elif key.lower() == 'valuestring':\n return value\n elif key.lower() == 'interpretation':\n return value['coding'][0]['display']\n elif key.lower() == 'referencerange':\n return dt_referencerange(value)\n elif key.lower() == 'requester':\n if 'display' in value['agent']:\n return dt_reference(value['agent'], member_id)\n elif key.lower() == 'practitioner':\n if 'display' in value:\n return dt_reference(value, member_id)\n elif key.lower() == 'organization':\n if 'display' in value:\n return dt_reference(value, member_id)\n # elif key.lower() == \"result\":\n # return dt_reference(value[0], member_id)\n elif key.lower() == 'practitioner':\n if 'display' in value:\n return dt_reference(value, member_id)\n elif key.lower() == 'organization':\n if 'display' in value:\n return dt_reference(value, member_id)\n elif key.lower() == 'participant':\n if 'display' in value[0]['individual']:\n return dt_reference(value[0]['individual'], member_id)\n elif key.lower() == 'location':\n if 'display' in value[0]['location']:\n return dt_reference(value[0]['location'], member_id)\n elif key.lower() == 'communication':\n return dt_communication(value)\n else:\n # print(\"value:\", value, \" type:\", type(value), \" for: \", key)\n return value", "def substitute(self, field_values):\n\n\t\tdef substituteFunc(mo):\n\t\t\tname = mo.group('braced')\n\t\t\tif name is not None:\n\t\t\t\tif name in field_values:\n\t\t\t\t\treturn str(field_values[name])\n\t\t\t\telse:\n\t\t\t\t\treturn self._DELIMITER + '{' + name + '}'\n\n\t\t\t# regexp could catch either 'braced' or 'escaped' substring\n\t\t\t# if it is not 'braced', it is 'escaped'\n\t\t\treturn self._DELIMITER\n\n\t\treturn self._PATTERN.sub(substituteFunc, self._template)", "def string_factory(list_of_dicts):\n result = []\n for item in range(len(list_of_dicts)):\n result.append(template.format(**list_of_dicts[item]))\n return result", "def _fill_template(\n self,\n template: Dict[Text, Any],\n template_vars: Dict[Text, Any]\n ) -> Dict[Text, Any]:\n\n line_object_keys = [\"quickReply\", \"items\", \"action\", \"template\", \"actions\"]\n\n if type(template) == list:\n for item in template:\n self._fill_template(item, template_vars)\n else:\n self._fill_template_text(template, template_vars)\n for key in line_object_keys:\n if key in template:\n self._fill_template(template[key], template_vars)\n\n return template", "def parse_template(template):\n field_name = None\n field_value = []\n\n for line in template.strip().split('\\n') + ['end:']:\n if line.startswith('#'):\n continue\n match = RE_TEMPLATE_FIELD_LINE.match(line)\n if match:\n if field_name is not None:\n yield (field_name, '\\n'.join(field_value).strip())\n elif len(field_value) > 0:\n logging.warning('Ignoring lines: %r', field_value)\n\n field_name = match.group(1)\n field_value = [match.group(2)]\n else:\n field_value.append(line)", "def format_yaml(template, config):\n formatted = template\n for k, v in config.items():\n formatted = formatted.replace('${%s}' % k, v)\n return formatted", "def _substitute(template, files, user_values):\n # Get all placeholder names\n placeholders = _get_placeholders(template)\n\n # Pre-fill placeholders based on existing file aliases\n placeholder_values = _prefill_placeholders(placeholders, files,\n user_values)\n\n # Add user specified values for the placeholders\n placeholder_values.update(**user_values)\n\n # Check whether all placeholder values are now properly provided.\n provided = set(placeholder_values.keys())\n needed = set(placeholders)\n missing = needed - provided\n if len(missing) > 0:\n raise ValueError('Cannot construct filename, because the following '\n 'parameters are missing: %s' % missing)\n\n # Do the substitution\n return template.format(**placeholder_values)", "def template_recurse(tmpl, tags):\n if isinstance(tmpl, str):\n ret = tmpl\n for k, v in tags.items():\n # If the value we are replacing is an exact match for a key, return the value as-is - we're assuming\n # here that we are NOT replacing a substring in a larger string \"hello {{name}}\", but that the\n # value should be used as-is, i.e. not converted into a string - i.e. it is a KEY in the JSON, not a VALUE.\n if ret == k:\n return v\n # Try to perform a simple string substitution - this assumes that the value is a string, or can be\n # converted to a string without any issues.\n ret = ret.replace(k, str(v))\n\n return ret\n\n if isinstance(tmpl, list):\n return [template_recurse(v, tags) for v in tmpl]\n\n if isinstance(tmpl, dict):\n return {k:template_recurse(v, tags) for k, v in tmpl.items()}\n\n return tmpl", "def __verify_values(self, tmpl_key, tmpl_value, parent):\n output = \"\"\n if tmpl_key not in parent:\n output = tmpl_value\n elif parent[tmpl_key] is None:\n output = tmpl_value\n else:\n if isinstance(parent[tmpl_key], list):\n for i in range(0, len(parent[tmpl_key])):\n for k, v in tmpl_value.items():\n parent[tmpl_key][i][k] = self.__verify_values(k, v, parent[tmpl_key][i])\n output = parent[tmpl_key]\n elif isinstance(tmpl_value, OrderedDict):\n for k, v in list(tmpl_value.items()):\n parent[tmpl_key][k] = self.__verify_values(k, v, parent[tmpl_key])\n output = parent[tmpl_key]\n else:\n output = parent[tmpl_key] if parent[tmpl_key].strip() != \"\" else tmpl_value\n return output", "def template(self, record):\n\n def _log_format_onecolor(record):\n \"\"\"\n Normal console output format\n \"\"\"\n\n return LEVEL_COLORS.get(record.levelname)\n\n def _log_format_notset(record, stylized=True):\n \"\"\"\n Default log format.\n \"\"\"\n\n reset = Style.RESET_ALL\n\n levelname = {\n 'style_before': LEVEL_COLORS.get(record.levelname) + Style.BRIGHT,\n 'format': '(%(levelname)s)',\n 'style_after': reset,\n 'prefix': '',\n 'suffix': '',\n }\n\n name = {\n 'style_before': Fore.WHITE + Style.DIM + Style.BRIGHT,\n 'format': '%(name)s',\n 'style_after': Fore.RESET + Style.RESET_ALL,\n 'prefix': ' ',\n 'suffix': ' ',\n }\n\n # format prefix + style_before + message + style_after + suffix\n result = reset\n for i in [levelname, name]:\n result += f\"{i['prefix']}{i['style_before']}{i['format']}{i['style_after']}{i['suffix']}\"\n result += reset\n\n return result\n\n # Template Switcher\n templates = {\n 'NOTSET': _log_format_notset,\n 'INFO': _log_format_onecolor,\n 'DELIMITER': _log_format_onecolor,\n 'TOPIC': _log_format_onecolor,\n 'WARNING': _log_format_onecolor,\n }\n\n return templates.get(record.levelname, _log_format_notset)(record)", "def load_ftpl(self, file):\n\n logger_env.info(\" Loading ftpl file %s\" % file)\n lines = open(file).readlines()\n for line in lines:\n line = line.split(\"#\")[0]\n fields = line.split(\":\")\n if len(fields) != 2:\n continue\n\n key_string = fields[0].strip()\n keys = key_string.split(\",\")\n keys = [x.strip().strip(\"\\\"\") for x in keys]\n keys = [x for x in keys if x]\n keys = tuple(keys)\n\n value_string = fields[1].strip()\n if keys in self.tpl:\n if value_string == \"!!!\":\n # Special rule to negate a key, it is used by the last loaded ftpl file\n # to overwrite values that might have been defined before.\n del self.tpl[keys]\n else:\n self.tpl[keys] = self.tpl[keys] + \" , \" + value_string\n else:\n self.tpl[keys] = value_string\n return", "def render_template(template, **template_values):\n # retrieve the html template\n t = jinja_environment.get_template(template)\n\n # render the html template with th given dictionary\n return t.render(template_values)", "def format_field(self, value, spec):\n cache = Cache()\n if spec == \"co\":\n # if cache(re.match(\"(.*)co$\", spec)):\n value = co_join(value)\n spec = \"s\"\n # cache.output.group(1) + \"s\"\n elif cache(re.match(r\"^sub(\\d?)_?(.*)$\", spec)):\n depth = (1 if cache.output.group(1) == \"\" else\n int(cache.output.group(1)))\n value = \"\\n\".join([\n \"{0}{1} = {2}\".format(depth * \" \", key, val)\n for key, val in value.items()])\n if cache.output.group(2) != \"\":\n value = (\n depth * \"[\" + cache.output.group(2) + depth * \"]\" + \"\\n\" +\n value)\n spec = \"s\"\n return super(Format, self).format_field(value, spec)", "def check_template_fields(self, model: Dict[str, Any], collection: str) -> bool:\n errors = False\n for template_field in self.get_fields(collection):\n if not isinstance(template_field, BaseTemplateField):\n continue\n field_error = False\n replacements = model.get(template_field.get_template_field_name())\n\n if replacements is None:\n replacements = []\n\n if not isinstance(replacements, list):\n self.errors.append(\n f\"{collection}/{model['id']}/{template_field.get_own_field_name()}: Replacements for the template field must be a list\"\n )\n field_error = True\n continue\n for replacement in replacements:\n if not isinstance(replacement, str):\n self.errors.append(\n f\"{collection}/{model['id']}/{template_field.get_own_field_name()}: Each replacement for the template field must be a string\"\n )\n field_error = True\n if field_error:\n errors = True\n continue\n replacement_collection = None\n if template_field.replacement_collection:\n replacement_collection = (\n template_field.replacement_collection.collection\n )\n\n for replacement in replacements:\n structured_field = self.make_structured(template_field, replacement)\n if structured_field not in model:\n self.errors.append(\n f\"{collection}/{model['id']}/{template_field.get_own_field_name()}: Missing {structured_field} since it is given as a replacement\"\n )\n errors = True\n\n if replacement_collection:\n try:\n as_id = int(replacement)\n except (TypeError, ValueError):\n self.errors.append(\n f\"{collection}/{model['id']}/{template_field.get_own_field_name()}: Replacement {replacement} is not an integer\"\n )\n if not self.find_model(replacement_collection, as_id):\n self.errors.append(\n f\"{collection}/{model['id']}/{template_field.get_own_field_name()}: Replacement {replacement} does not exist as a model of collection {replacement_collection}\"\n )\n\n for field in model.keys():\n if self.is_structured_field(field):\n try:\n _template_field, _replacement = self.to_template_field(\n collection, field\n )\n if (\n template_field.get_own_field_name() == _template_field\n and _replacement\n not in model[template_field.get_own_field_name()]\n ):\n self.errors.append(\n f\"{collection}/{model['id']}/{field}: Invalid structured field. Missing replacement {_replacement} in {template_field.get_own_field_name()}\"\n )\n errors = True\n except CheckException as e:\n self.errors.append(\n f\"{collection}/{model['id']}/{field} error: \" + str(e)\n )\n errors = True\n\n return errors", "def _generate_template(dictionary):\n task_dict = dict(dictionary)\n lines = []\n for key in sorted(TaskInfo._READ_ONLY_FIELDS):\n if key not in task_dict:\n continue\n\n value = TaskInfo._dpop(task_dict, key)\n lines.extend([\n \"# {}:\".format(key),\n \"# {}\".format(\"\\n#\".join(value.splitlines())),\n \"\",\n ])\n\n for key in sorted(task_dict.keys()):\n lines.extend([\n \"{}:\".format(key),\n str(task_dict[key]),\n \"\",\n ])\n\n return \"\\n\".join(lines)", "def map_template(template: dict, input_: dict) -> None:\n for k, v in template.items():\n config_val = input_.get(k)\n\n if isinstance(v, dict) and k != 'NullHandler':\n map_template(v, input_)\n\n if config_val:\n template[k] = config_val.upper() if k == 'level' else config_val", "def render_template(self, template_txt, model, res_ids, post_process=False):\n multi_mode = True\n if isinstance(res_ids, (int, long)):\n multi_mode = False\n res_ids = [res_ids]\n\n results = dict.fromkeys(res_ids, u\"\")\n\n # try to load the template\n try:\n mako_env = mako_safe_template_env if self.env.context.get('safe') else mako_template_env\n template = mako_env.from_string(tools.ustr(template_txt))\n except Exception:\n _logger.info(\"Failed to load template %r\", template_txt, exc_info=True)\n return multi_mode and results or results[res_ids[0]]\n\n # prepare template variables\n records = self.env[model].browse(filter(None, res_ids)) # filter to avoid browsing [None]\n res_to_rec = dict.fromkeys(res_ids, None)\n for record in records:\n res_to_rec[record.id] = record\n variables = {\n 'format_date': lambda date, format=False, context=self._context: format_date(self.env, date, format),\n 'format_tz': lambda dt, tz=False, format=False, context=self._context: format_tz(self.env, dt, tz, format),\n 'format_amount': lambda amount, currency, context=self._context: format_amount(self.env, amount, currency),\n 'user': self.env.user,\n 'ctx': self._context, # context kw would clash with mako internals\n }\n for res_id, record in res_to_rec.iteritems():\n variables['object'] = record\n try:\n render_result = template.render(variables)\n except Exception:\n _logger.info(\"Failed to render template %r using values %r\" % (template, variables), exc_info=True)\n raise UserError(_(\"Failed to render template %r using values %r\")% (template, variables))\n if render_result == u\"False\":\n render_result = u\"\"\n results[res_id] = render_result\n\n if post_process:\n for res_id, result in results.iteritems():\n results[res_id] = self.render_post_process(result)\n\n return multi_mode and results or results[res_ids[0]]", "def build_document(self, values: dict):\n doc = Document(self.config.get('template_file'))\n for section in doc.sections:\n # First Header\n header = section.header\n for p in header.paragraphs:\n for key, field in self.config.get('fields').items():\n # Format key\n formatted_key = f\"<{key.upper()}>\"\n if formatted_key in p.text:\n inline = p.runs\n # Loop added to work with runs (strings with same style)\n for i in range(len(inline)):\n if formatted_key in inline[i].text:\n text = inline[i].text.replace(\n formatted_key, values[key])\n inline[i].text = text\n # Second Footer\n footer = section.footer\n for p in footer.paragraphs:\n for key, field in self.config.get(\"fields\").items():\n # Format key\n formatted_key = f\"<{key.upper()}>\"\n if formatted_key in p.text:\n inline = p.runs\n # Loop added to work with runs (strings with same style)\n for i in range(len(inline)):\n if formatted_key in inline[i].text:\n text = inline[i].text.replace(\n formatted_key, values[key])\n inline[i].text = text\n\n # Go by tables\n for table in doc.tables:\n for row in table.rows:\n for cell in row.cells:\n for p in cell.paragraphs:\n for key, field in self.config.get(\"fields\").items():\n # Format key\n formatted_key = f\"<{key.upper()}>\"\n if formatted_key in p.text:\n inline = p.runs\n # Loop added to work with runs (strings with same style)\n for i in range(len(inline)):\n if formatted_key in inline[i].text:\n text = inline[i].text.replace(formatted_key, values[key])\n inline[i].text = text\n\n # Go by the rest of the document\n for p in doc.paragraphs:\n for key, field in self.config.get(\"fields\").items():\n # Format key\n formatted_key = f\"<{key.upper()}>\"\n if formatted_key in p.text:\n inline = p.runs\n # Loop added to work with runs (strings with same style)\n for i in range(len(inline)):\n if formatted_key in inline[i].text:\n text = inline[i].text.replace(formatted_key, values[key])\n inline[i].text = text\n\n # By default filename will be the template filename with copy_ before\n filename = f\"copy_{self.config.get('template_file')}\"\n if \"filename\" in self.config:\n if \"type\" in self.config.get(\"filename\"):\n # We can have 2 types static value or based in a field\n if self.config.get(\"filename\").get(\"type\") == \"static\" and \"value\" in self.config.get(\"filename\"):\n filename = self.config.get(\"filename\").get(\"value\")\n elif self.config[\"filename\"][\"type\"] == \"field\" and \"value\" in self.config[\"filename\"]:\n filename = values.get(self.config.get(\"filename\").get(\"value\"))\n\n # Make sure we have a prefix populated\n if \"file_prefix\" not in self.config:\n self.config.update({\"file_prefix\": \"\"})\n\n # Make sure we have a posfix populated\n if \"file_posfix\" not in self.config:\n self.config.update({\"file_posfix\": \"\"})\n\n filename = self.config.get(\"file_prefix\") + filename + self.config.get(\"file_posfix\")\n\n doc.save(f\"{filename}.docx\")\n\n return f\"{filename}.docx\"", "def template(self, value: str):\n self._template = value", "def format_template(template, *args):\n return textwrap.dedent(template % args).strip()", "def get_template(self):\n return self.sep.join([self.htmls[html] for html in self.lang]).format(**self.fields)", "def update_template(template, trial):\n assert isinstance(template, dict) or isinstance(template, list)\n items = template.items() if isinstance(template, dict) else enumerate(template)\n\n for key, value in items:\n if isinstance(value, str):\n if value in trial:\n template[key] = trial[value]\n elif isinstance(value, dict) or isinstance(value, list):\n template[key] = ConfigGenerator.update_template(template[key], trial)\n\n return template", "def testTitleTemplateSubstitute(self):\n\n\t\tfield_values = {'abc': 'ABC', 'a.1': 'VALUE'}\n\n\t\ttests = {\n\t\t\t'${abc} $$ ${} ${{{} ${abc}': 'ABC $ ${} ${{{} ABC',\n\t\t\t'$abc ${a.1} $$$$': '$abc VALUE $$'\n\t\t}\n\n\t\tfor test in tests:\n\t\t\tt = TitleTemplate(test)\n\t\t\tself.assertEqual(t.substitute(field_values), tests[test])" ]
[ "0.7629409", "0.6301995", "0.605785", "0.58470714", "0.58328855", "0.5744207", "0.56723994", "0.56697905", "0.5642098", "0.5629034", "0.55882555", "0.55636996", "0.537064", "0.53584594", "0.5324042", "0.52790993", "0.5207045", "0.5204724", "0.51992595", "0.5162257", "0.5155697", "0.51486397", "0.51399326", "0.5127797", "0.51274216", "0.5117155", "0.5095036", "0.506397", "0.50547624", "0.5037204" ]
0.80320483
0
Check whether a file path is on a CIFS filesystem mounted in a POSIX host. POSIX hosts are assumed to have the ``mount`` command. On Windows, Docker mounts host directories into containers through CIFS shares, which has support for Minshall+French symlinks, or text files that the CIFS driver exposes to the OS as symlinks. We have found that under concurrent access to the filesystem, this feature can result in failures to create or read recentlycreated symlinks, leading to inconsistent behavior and ``FileNotFoundError`` errors. This check is written to support disabling symlinks on CIFS shares.
def on_cifs(cls, path: os.PathLike) -> bool: return cls.get_mount(path)[1] == "cifs"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ismount(path):\n try:\n s1 = os.lstat(path)\n except (OSError, ValueError):\n # It doesn't exist -- so not a mount point. :-)\n return False\n else:\n # A symlink can never be a mount point\n if os.path.stat.S_ISLNK(s1.st_mode):\n return False\n\n if isinstance(path, bytes):\n parent = os.path.join(path, b'..')\n else:\n parent = os.path.join(path, '..')\n parent = os.path.realpath(parent)\n try:\n s2 = os.lstat(parent)\n except (OSError, ValueError):\n return False\n\n dev1 = s1.st_dev\n dev2 = s2.st_dev\n if dev1 != dev2:\n return True # path/.. on a different device as path\n ino1 = s1.st_ino\n ino2 = s2.st_ino\n if ino1 == ino2:\n return True # path/.. is the same i-node as path\n return False", "def check_filesystem(ssh_connection, disk_fmt, disk):\n if disk_fmt == \"squashfs\":\n return\n cmd = \"fsck.{} -n {}\".format(disk_fmt, disk)\n exit_code, _, stderr = ssh_connection.run(cmd)\n assert exit_code == 0, stderr", "def mount_cifs_share(self, share_config):\n mount_point = share_config.get('mount_point')\n share_name = share_config.get('share_name')\n remote_host = share_config.get('remote_host')\n if remote_host and share_name and mount_point:\n command = 'mount -t cifs -o guest //%s/%s %s' % (remote_host, share_name, mount_point)\n self.cmd(command)", "def is_mounted(self):\n try:\n _ = openmediavault.subprocess.check_output(\n [\n 'findmnt',\n '--canonicalize',\n '--first-only',\n '--noheadings',\n '--raw',\n '--nofsroot',\n self.canonical_device_file,\n ]\n )\n return True\n except subprocess.CalledProcessError:\n pass\n return False", "def ismount(path):\n return True if not get_instance(path).relpath(path) else False", "def _check_mounted_system(self):\n res = self.su_cmd('touch /system/.dwarf_check')\n if res == '':\n res = self._do_adb_command('shell ls -la /system')\n if '.dwarf_check' in res:\n res = self.su_cmd('rm /system/.dwarf_check')\n if res == '':\n return True\n elif res == 'Read-only file system':\n return False\n\n return False", "def fs_ok(fs_info):\n if fs_info.mountpoint == '/':\n return True\n\n if (fs_info.device == fs_info.fstype or fs_info.fstype == 'nullfs' or\n '/docker' in fs_info.mountpoint or\n fs_info.mountpoint.startswith('/etc') or\n fs_info.mountpoint.startswith('/lib/modules')):\n return False\n\n if fs_info.device.startswith('/dev/'):\n return True\n\n return False", "def _ensure_share_mounted(self, nfs_share, mount_path=None):\n mnt_flags = []\n if self.shares.get(nfs_share) is not None:\n mnt_flags = self.shares[nfs_share].split()\n num_attempts = max(1, self.configuration.nfs_mount_attempts)\n for attempt in range(num_attempts):\n try:\n if mount_path is None:\n self._remotefsclient.mount(nfs_share, mnt_flags)\n else:\n if mount_path in self._remotefsclient._read_mounts():\n LOG.info('Already mounted: %s', mount_path)\n return\n\n fileutils.ensure_tree(mount_path)\n self._remotefsclient._mount_nfs(nfs_share, mount_path,\n mnt_flags)\n return\n except Exception as e:\n if attempt == (num_attempts - 1):\n LOG.error('Mount failure for %(share)s after '\n '%(count)d attempts.',\n {'share': nfs_share,\n 'count': num_attempts})\n raise exception.NfsException(six.text_type(e))\n LOG.warning(\n 'Mount attempt %(attempt)d failed: %(error)s. '\n 'Retrying mount ...',\n {'attempt': attempt, 'error': e})\n greenthread.sleep(1)", "def is_mounted(volume):\n mounts = sudo(\"mount\", quiet=True).split(\"\\n\")\n for m in mounts:\n if m.startswith(volume + \" \"):\n return True\n return False", "def mount_nfs_share(self, share_config):\n remote_host = share_config.get('remote_host')\n remote_dir = share_config.get('remote_dir')\n mount_point = share_config.get('mount_point')\n if remote_host and remote_dir:\n if mount_point:\n command = 'mount -t nfs %s:%s %s' % (remote_host, remote_dir, mount_point)\n self.cmd(command)", "def is_afs_mounted():\n return afs_mountpoint() is not None", "def is_mounted(dev):\n dev = os.path.realpath(dev)\n with file('/proc/mounts', 'rb') as proc_mounts:\n for line in proc_mounts:\n fields = line.split()\n if len(fields) < 3:\n continue\n mounts_dev = fields[0]\n path = fields[1]\n if mounts_dev.startswith('/') and os.path.exists(mounts_dev):\n mounts_dev = os.path.realpath(mounts_dev)\n if mounts_dev == dev:\n return path\n return None", "def ismount(self, vPath):\n return vPath[1:] in self.listdir('/')", "def testCheckFileExistsByPath(self):\n test_file_path = self._GetTestFilePath(['utmp-linux_libc6'])\n self._SkipIfPathNotExists(test_file_path)\n\n test_helper = dfvfs_helpers.DFVFSFileSystemHelper(None)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)\n test_helper.OpenFileSystem(path_spec)\n\n result = test_helper.CheckFileExistsByPath(test_file_path)\n self.assertTrue(result)", "def check_already_mounted(devpath, mountpoint):\n mounts = Mounter().read_mounts()\n for m in mounts:\n if devpath == m.device and mountpoint == m.mountpoint:\n return True\n return False", "def is_mountpoint(path: str) -> bool:\n mtpt = subprocess.run([\"mountpoint\", path], check=False, capture_output=True)\n return mtpt.returncode == 0", "def is_posix_path(my_path: str) -> bool:\n return \"/\" in str(my_path)", "def is_mounted(device):\n\n partitions = psutil.disk_partitions()\n device_path = \"/dev/\" + device\n for i in partitions:\n if i.device == device_path:\n return True\n return False", "def is_posix_path3(my_path):\n return \"/\" in str(my_path)", "def mounted(self):\n return os.path.ismount(self.get(\"~mountpoint\", \"/\"))", "def is_linkto_file(host, fqpath):\n command = 'file %s' % fqpath\n rcode, rout, _ = g.run(host, command)\n\n if rcode == 0:\n if 'sticky empty' in rout.strip():\n stat = get_file_stat(host, fqpath)\n if int(stat['size']) == 0:\n # xattr = get_fattr(host, fqpath,\n # 'trusted.glusterfs.dht.linkto')\n xattr = get_dht_linkto_xattr(host, fqpath)\n if xattr is not None:\n return True\n\n return False", "def is_mountpoint(path):\r\n return path in [m['dest'] for m in mounts()]", "def is_fs_link(pathname: Union[str, os.PathLike]) -> bool:\n return os.path.islink(pathname)", "def is_fs_file(pathname: Union[str, os.PathLike]) -> bool:\n return os.path.isfile(pathname)", "def is_posix_path2(my_path):\n return \"/\" in str(my_path)", "def test_mount_status_nas_share(self):\n pass", "def isfile(path):\r\n path = encode(path)\r\n if path.lower().startswith(\"smb://\"):\r\n return samba.file_exists(os.path.basename(path), os.path.dirname(path))\r\n else:\r\n return os.path.isfile(path)", "def test_mount_status_nas_share_by_nas(self):\n pass", "def exists(path):\r\n path = encode(path)\r\n if path.lower().startswith(\"smb://\"):\r\n try:\r\n return samba.file_exists(os.path.basename(path), os.path.dirname(path)) or \\\r\n samba.folder_exists(os.path.basename(path), os.path.dirname(path))\r\n except gaierror:\r\n logger.info(\"deportesalacarta.core.filetools exists: No es posible conectar con la ruta\")\r\n platformtools.dialog_notification(\"No es posible conectar con la ruta\", path)\r\n return True\r\n else:\r\n return os.path.exists(path)", "def is_filesystem_enabled(dbapi, host_id_or_uuid, fs_name):\n filesystems = dbapi.host_fs_get_by_ihost(host_id_or_uuid)\n for fs in filesystems:\n if fs.name == fs_name:\n return True\n return False" ]
[ "0.6743361", "0.6556107", "0.64604294", "0.62453586", "0.6219545", "0.61390376", "0.60891384", "0.60775924", "0.60729563", "0.5945389", "0.5936177", "0.58674806", "0.5843192", "0.5838682", "0.5829992", "0.58089876", "0.58017236", "0.5783103", "0.575349", "0.5714421", "0.56780875", "0.5673771", "0.5667954", "0.5628095", "0.56253856", "0.5572589", "0.55652404", "0.5491563", "0.5462597", "0.5430677" ]
0.7511037
0
Parse the output of ``mount`` to produce (path, fs_type) pairs. Separated from _generate_cifs_table to enable testing logic with real outputs
def parse_mount_table( cls, exit_code: int, output: str ) -> ty.List[ty.Tuple[str, str]]: # Not POSIX if exit_code != 0: return [] # Linux mount example: sysfs on /sys type sysfs (rw,nosuid,nodev,noexec) # <PATH>^^^^ ^^^^^<FSTYPE> # OSX mount example: /dev/disk2 on / (hfs, local, journaled) # <PATH>^ ^^^<FSTYPE> pattern = re.compile(r".*? on (/.*?) (?:type |\()([^\s,\)]+)") # Keep line and match for error reporting (match == None on failure) # Ignore empty lines matches = [(ll, pattern.match(ll)) for ll in output.strip().splitlines() if ll] # (path, fstype) tuples, sorted by path length (longest first) mount_info = sorted( (match.groups() for _, match in matches if match is not None), key=lambda x: len(x[0]), reverse=True, ) cifs_paths = [path for path, fstype in mount_info if fstype.lower() == "cifs"] # Report failures as warnings for line, match in matches: if match is None: logger.debug("Cannot parse mount line: '%s'", line) return [ mount for mount in mount_info if any(mount[0].startswith(path) for path in cifs_paths) ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_cifs_table(cls) -> ty.List[ty.Tuple[str, str]]:\n exit_code, output = sp.getstatusoutput(\"mount\")\n return cls.parse_mount_table(exit_code, output)", "def parseMountOutput(output):\n\t\n\t# none on /proc/sys/fs/binfmt_misc type binfmt_misc (rw)\n\t\n\tparsedOutput = {}\n\tregex = \"(\\S+)\\s+on\\s+(\\S+)\\s+type\\s+(\\S+)\\s+\\((\\S+)\\)\"\n\tfor l in output:\n\t\tif re.search(regex,l):\n\t\t\tm = re.search(regex,l)\n\t\t\tdev = m.group(1)\n\t\t\tmntpoint = m.group(2)\n\t\t\tfs = m.group(3)\n\t\t\tperm = m.group(4)\n\t\t\tparsedOutput[dev] = {}\n\t\t\tparsedOutput[dev]['mntpoint'] = mntpoint\n\t\t\tparsedOutput[dev]['filesys'] = fs\n\t\t\tparsedOutput[dev]['perm'] = perm\n\t\t\t\n\t\t\t\n\treturn parsedOutput", "def get_mount_info(devname, label=None):\n mount_point = get_mount_target(devname, label)\n mounts = check_output('mount | grep \" %s \" || :' % mount_point, shell=True)\n if mounts:\n return Munch(zip(('device', 'mount_point', 'type', 'options'),\n MOUNTS_RE.match(mounts.decode()).groups()))", "def get_mounts(self):\n cmd = ['mount', '-t', 'btrfs', '-l']\n out, err, ret = self.justcall(cmd)\n if ret != 0:\n raise InitError(\"error running %s:\\n\"%' '.join(cmd)+err)\n mounts = {}\n for line in out.split(\"\\n\"):\n if len(line) == 0 or \" on \" not in line or \" type btrfs \" not in line:\n continue\n mntpt = line[line.index(\" on \")+4:line.index(\" type btrfs \")]\n if '[' in line:\n l = line.split('[')\n label = l[-1].strip(']')\n else:\n label = self.get_label(mntpt)\n mounts[mntpt] = label\n return mounts", "def get_mount_points():\n\n points = []\n t = subprocess.check_output(['mount'])\n t = t.decode()\n\n for line in t.splitlines():\n t = line.find('smbfs')\n if t < 0: continue\n b = line.find(' on ')\n points.append(line[b+4: t-2])\n # //[email protected]/storage on /Volumes/storage (smbfs, nodev, nosuid, mounted by ruan)\n return points", "def read_mounts(self, filter_device=(), filter_fstype=()):\n try:\n (out, err) = processutils.execute('cat', proc_mounts_path,\n check_exit_code=0)\n except processutils.ProcessExecutionError:\n msg = _(\"Failed to read mounts.\")\n raise exceptions.FileNotFound(msg)\n\n lines = out.split('\\n')\n mounts = []\n for line in lines:\n if not line:\n continue\n tokens = line.split()\n if len(tokens) < 4:\n continue\n if tokens[0] in filter_device or tokens[1] in filter_fstype:\n continue\n mounts.append(MountInfo(device=tokens[0], mountpoint=tokens[1],\n fstype=tokens[2], opts=tokens[3]))\n return mounts", "def parse_local_mounts(xs):\n return [(d[\"source\"], d[\"target\"], d[\"fs_type\"]) for d in xs]", "def iter_mounts():\n with open('/proc/mounts', 'r') as mounts:\n for line in mounts.readlines():\n (device, mountpoint, filesystem,\n options, _, _) = line.decode('UTF-8').split()\n yield (device, mountpoint, filesystem, options)", "def get_file_systems(self):\n result = {}\n if os.access('/proc/mounts', os.R_OK):\n file = open('/proc/mounts')\n for line in file:\n try:\n mount = line.split()\n device = mount[0]\n mount_point = mount[1]\n fs_type = mount[2]\n except (IndexError, ValueError):\n continue\n\n # Skip the filesystem if it is not in the list of valid\n # filesystems\n if fs_type not in self.filesystems:\n self.log.debug(\"Ignoring %s since it is of type %s \" +\n \" which is not in the list of filesystems.\",\n mount_point, fs_type)\n continue\n\n # Process the filters\n if self.exclude_reg.search(mount_point):\n self.log.debug(\"Ignoring %s since it is in the \" +\n \"exclude_filter list.\", mount_point)\n continue\n\n if ((('/' in device or ':' in device or device == 'tmpfs') and\n mount_point.startswith('/'))):\n try:\n stat = os.stat(mount_point)\n except OSError:\n self.log.debug(\"Path %s is not mounted - skipping.\",\n mount_point)\n continue\n\n if stat.st_dev in result:\n continue\n\n result[stat.st_dev] = {\n 'device': os.path.realpath(device),\n 'mount_point': mount_point,\n 'fs_type': fs_type\n }\n\n file.close()\n\n else:\n if not psutil:\n self.log.error('Unable to import psutil')\n return None\n\n partitions = psutil.disk_partitions(False)\n for partition in partitions:\n result[len(result)] = {\n 'device': os.path.realpath(partition.device),\n 'mount_point': partition.mountpoint,\n 'fs_type': partition.fstype\n }\n pass\n\n return result", "def mounts():\n if not _mounts:\n raw = subprocess.check_output(['mount'])\n if sys.platform == 'darwin':\n _mounts.extend(_parse_darwin_mounts(raw))\n elif sys.platform.startswith('linux'):\n _mounts.extend(_parse_linux_mounts(raw))\n else:\n log.warning('cannot parse mounts for %s' % sys.platform)\n return list(_mounts)", "def mounts():\r\n ret = []\r\n with open('/proc/mounts') as f:\r\n lines = f.readlines()\r\n for line in lines:\r\n m = re.match(\r\n r'(?P<src>\\S+) (?P<dest>\\S+) (?P<type>\\S+)', line)\r\n if m:\r\n ret.append(m.groupdict())\r\n return ret", "def get_mount(cls, path: os.PathLike) -> ty.Tuple[Path, str]:\n try:\n # Only the first match (most recent parent) counts, mount table sorted longest\n # to shortest\n return next(\n (Path(p), t)\n for p, t in cls.get_mount_table()\n if str(path).startswith(p)\n )\n except StopIteration:\n return (Path(\"/\"), \"ext4\")", "def getmount_fstype(mypath):\n\n mountpoint = getmount(mypath)\n return get_fs_type(mountpoint)", "def parse_diskstats(self, input):\n # made edits to original code here to output string values instead of bytes\n dstats = {}\n dsi = input.rfind('Device:'.encode())\n if dsi == -1:\n raise ParseError('Unknown input format: %r' % input)\n\n ds = input[dsi:].splitlines()\n hdr = ds.pop(0).split()[1:]\n\n for d in ds:\n if d:\n d = d.split()\n dev = d.pop(0)\n if (dev.decode('utf-8') in self.disks) or not self.disks:\n dstats[dev.decode('utf-8')] = dict([(k.decode('utf-8'), float(v)) for k, v in zip(hdr, d)])\n return dstats", "def get_type(full_path):\n status = {'type': []}\n if os.path.ismount(full_path):\n status['type'] += ['mount-point']\n elif os.path.islink(full_path):\n status['type'] += ['symlink']\n if os.path.isfile(full_path):\n status['type'] += ['file']\n elif os.path.isdir(full_path):\n status['type'] += ['dir']\n if not status['type']:\n if os.stat.S_ISSOCK(status['mode']):\n status['type'] += ['socket']\n elif os.stat.S_ISCHR(status['mode']):\n status['type'] += ['special']\n elif os.stat.S_ISBLK(status['mode']):\n status['type'] += ['block-device']\n elif os.stat.S_ISFIFO(status['mode']):\n status['type'] += ['pipe']\n if not status['type']:\n status['type'] += ['unknown']\n elif status['type'] and status['type'][-1] == 'symlink':\n status['type'] += ['broken']\n return status['type']", "def collect_existing_mounts():\n result = {}\n for mount in sh.mount().stdout.decode('utf-8').splitlines():\n tokens = mount.split()\n if tokens[1] == 'on' and tokens[0].startswith('/dev/'):\n device = tokens[0][5:]\n result[tokens[2]] = device\n return result", "def get_devs_by_mount(input, prefixes=['/dev/mm.', '/dev/mapper']):\n if type(input) is str:\n input = input.split('\\n')\n result = list()\n for line in input:\n hit = [re.match('({}[^\\s]+)\\son\\s([^\\s]+).*'.format(prefix), line) for prefix in prefixes]\n if any(hit):\n result.append(re.match('([^\\s]+).*', line).group(1))\n return result", "def main():\n mount_file = '/proc/mounts'\n if os.path.isfile(mount_file):\n try:\n f = open(mount_file, 'r')\n except IOError:\n print 'cannot open', mount_file\n else:\n lines = []\n lines = f.readlines()\n f.close()\n\n matching = [line for line in lines if \"rootfs\" in line]\n #print matching\n \n removed = [lines.remove(m) for m in matching]\n #print removed\n \n for line in lines:\n if line.endswith(\"0 0\\n\"):\n line = line[:-5] \n #print line\n # line = line.rstrip(\" 0\\n\") does not work if\n # the line contains 0. \n # i.e. \"...gid=5,mode=620,ptmxmode=000 0 0\\n\"\n\n fields = line.split(\" \")\n #print fields\n\n if (len(fields) != 4):\n print 'cannot format', line\n else:\n print fields[0], 'on', fields[1], 'type', fields[2], \\\n '('+ fields[3] + ')'\n else:\n print 'cannot find', mount_file\n\n return 0", "def get_mounts(self):\n return [m.split()[0] for m in self.xlist(\"get-mounts\")[1]]", "def get_disks(self):\n result = {}\n\n exp = self.config['devices']\n reg = re.compile(exp)\n fs_types = set(self.config['fs_types'].split(','))\n\n try:\n fp = open('/proc/mounts')\n for line in fp:\n columns = line.split()\n device = columns[0].strip('/').replace('dev/','',1)\n mount_point = columns[1]\n fs_type = columns[2]\n\n if not reg.match(device):\n continue\n\n if fs_type not in fs_types:\n continue\n\n result[device] = mount_point\n except Exception as e:\n self.log.debug('Could not read /proc/mounts!')\n self.log.exception(e)\n finally:\n fp.close()\n return result", "def info(self, paths):\n self.tracer.info(\"%s.info method called\" % self.__class__.__name__)\n\n mounts = []\n\n for path in paths:\n # determine real OS path without symlinks and retrieve the mounted devices\n path = os.path.realpath(path)\n\n # if path isn't mounted, skip this entry\n if not os.path.ismount(path):\n continue\n\n ## get fstype and device from /proc/mounts\n (code, output) = Helper._run2PipedOsCommand(\"cat /proc/mounts\", \"grep -w %s\" % path)\n if not code == 0:\n self.tracer.warning(\"error running cat /proc/mounts: code %s: %s\" % (code, output))\n dev = \"?\"\n fstype = \"?\"\n else:\n dev = output.split()[0]\n fstype = output.split()[2]\n\n # combine all extracted information\n mounts.append({\n \"path\" : path,\n \"OS Filesystem Type\" : fstype,\n \"OS Device\" : dev,\n })\n\n return mounts", "def check_fs(uuid):\n out, err = run_cmd(['lsblk', '-o', 'UUID,FSTYPE', '--json'])\n\n blockdevices = json.loads(out)['blockdevices']\n\n for blkdevice in blockdevices:\n if key_exists('uuid', blkdevice) and blkdevice['uuid'] == uuid:\n return blkdevice['fstype']", "def get_fs_type(mypath):\n\n root_type = ''\n for part in psutil.disk_partitions():\n if part.mountpoint == os.path.sep:\n root_type = part.fstype\n continue\n if str(mypath).startswith(part.mountpoint):\n return part.fstype\n return root_type", "def get_mount_usage(paths):\n\n mount_usage = {}\n for mount, stats in get_disk_usage().items():\n for path in paths:\n if (mount == get_mount_point(path)):\n mount_usage[path] = stats\n return mount_usage", "def _get_mount(self):\n if not self._mount.endswith(os.path.sep):\n return \"%s%s\" % (self._mount, os.path.sep)\n else:\n return self._mount", "def mpt():\n lbl_drives = ['device','mountpoint','fstype']\n disks = [d[0:3] for d in psutil.disk_partitions()]\n drives = [dict(zip(lbl_drives,ds)) for ds in disks]\n return [d['mountpoint']for d in drives]", "def find_dev_mount_point(self, usb_table):\n mounts = open(\"/proc/mounts\")\n mount_lines = mounts.readlines()\n table = usb_table\n i = 0\n for device in table:\n for line in mount_lines:\n arguments = line.split(\" \")\n if arguments[0] == device[0]:\n usb_table[i].append(arguments[1])\n usb_table[i] = self.get_drive_stat(usb_table[i])\n break\n i += 1\n return usb_table", "def parse_device_tags(output):\n for line in output.strip().split('\\n'):\n if line.strip():\n try:\n yield {key: value for key, value in\n (v.split('=', 1) for v in shlex.split(line))}\n except ValueError as err:\n raise ValueError(\n _(\"Malformed blkid/lsblk output line '%(line)s': %(err)s\")\n % {'line': line, 'err': err})", "def get_pathinfo(host, fqpath):\n pathinfo = {}\n pathinfo['raw'] = get_fattr(host, fqpath, 'trusted.glusterfs.pathinfo')\n pathinfo['brickdir_paths'] = re.findall(r\".*?POSIX.*?:(\\S+)\\>\",\n pathinfo['raw'])\n\n return pathinfo", "def mounted(\n name,\n device,\n fstype,\n mkmnt=False,\n opts=\"defaults\",\n dump=0,\n pass_num=0,\n config=\"/etc/fstab\",\n persist=True,\n mount=True,\n user=None,\n match_on=\"auto\",\n device_name_regex=None,\n extra_mount_invisible_options=None,\n extra_mount_invisible_keys=None,\n extra_mount_ignore_fs_keys=None,\n extra_mount_translate_options=None,\n hidden_opts=None,\n bind_mount_copy_active_opts=True,\n **kwargs\n):\n ret = {\"name\": name, \"changes\": {}, \"result\": True, \"comment\": \"\"}\n\n update_mount_cache = False\n\n if not name:\n ret[\"result\"] = False\n ret[\"comment\"] = \"Must provide name to mount.mounted\"\n return ret\n\n if not device:\n ret[\"result\"] = False\n ret[\"comment\"] = \"Must provide device to mount.mounted\"\n return ret\n\n if not fstype:\n ret[\"result\"] = False\n ret[\"comment\"] = \"Must provide fstype to mount.mounted\"\n return ret\n\n if device_name_regex is None:\n device_name_regex = []\n\n # Defaults is not a valid option on Mac OS\n if __grains__[\"os\"] in [\"MacOS\", \"Darwin\"] and opts == \"defaults\":\n opts = \"noowners\"\n\n # Defaults is not a valid option on AIX\n if __grains__[\"os\"] in [\"AIX\"]:\n if opts == \"defaults\":\n opts = \"\"\n\n # Make sure that opts is correct, it can be a list or a comma delimited\n # string\n if isinstance(opts, str):\n opts = opts.split(\",\")\n\n if isinstance(hidden_opts, str):\n hidden_opts = hidden_opts.split(\",\")\n\n # remove possible trailing slash\n if not name == \"/\":\n name = name.rstrip(\"/\")\n\n device_list = []\n # Get the active data\n active = __salt__[\"mount.active\"](extended=True)\n real_name = os.path.realpath(name)\n # real_name for comparisons to the active mount list\n comp_real_name = real_name.replace(\" \", \"\\\\040\")\n if device.startswith(\"/\"):\n if \"bind\" in opts and comp_real_name in active:\n _device = device.replace(\" \", \"\\\\040\")\n if active[comp_real_name][\"device\"].startswith(\"/\"):\n # Find the device that the bind really points at.\n while True:\n if _device in active:\n _real_device = active[_device][\"device\"]\n if bind_mount_copy_active_opts:\n opts = sorted(\n set(\n opts\n + active[_device][\"opts\"]\n + active[_device][\"superopts\"]\n )\n )\n active[comp_real_name][\"opts\"].append(\"bind\")\n break\n _device = os.path.dirname(_device.replace(\"\\\\040\", \" \"))\n real_device = _real_device\n else:\n # Remote file systems act differently.\n if _device in active:\n if bind_mount_copy_active_opts:\n opts = sorted(\n set(\n opts\n + active[_device][\"opts\"]\n + active[_device][\"superopts\"]\n )\n )\n active[comp_real_name][\"opts\"].append(\"bind\")\n real_device = active[comp_real_name][\"device\"]\n else:\n real_device = os.path.realpath(device)\n elif device.upper().startswith(\"UUID=\"):\n real_device = device.split(\"=\")[1].strip('\"').lower()\n elif device.upper().startswith(\"LABEL=\"):\n _label = device.split(\"=\")[1]\n cmd = \"blkid -t LABEL={}\".format(_label)\n res = __salt__[\"cmd.run_all\"](\"{}\".format(cmd))\n if res[\"retcode\"] > 0:\n ret[\"comment\"] = \"Unable to find device with label {}.\".format(_label)\n ret[\"result\"] = False\n return ret\n else:\n # output is a list of entries like this:\n # /dev/sda: LABEL=\"<label>\" UUID=\"<uuid>\" UUID_SUB=\"<uuid>\" TYPE=\"btrfs\"\n # exact list of properties varies between filesystems, but we're\n # only interested in the device in the first column\n for line in res[\"stdout\"]:\n dev_with_label = line.split(\":\")[0]\n device_list.append(dev_with_label)\n real_device = device_list[0]\n else:\n real_device = device\n\n # LVS devices have 2 names under /dev:\n # /dev/mapper/vg--name-lv--name and /dev/vg-name/lv-name\n # No matter what name is used for mounting,\n # mount always displays the device as /dev/mapper/vg--name-lv--name\n # Note the double-dash escaping.\n # So, let's call that the canonical device name\n # We should normalize names of the /dev/vg-name/lv-name type to the canonical name\n lvs_match = re.match(r\"^/dev/(?P<vg_name>[^/]+)/(?P<lv_name>[^/]+$)\", device)\n if lvs_match:\n double_dash_escaped = {\n k: re.sub(r\"-\", \"--\", v) for k, v in lvs_match.groupdict().items()\n }\n mapper_device = \"/dev/mapper/{vg_name}-{lv_name}\".format(**double_dash_escaped)\n if os.path.exists(mapper_device):\n real_device = mapper_device\n\n # When included in a Salt state file, FUSE devices are prefaced by the\n # filesystem type and a hash, e.g. sshfs. In the mount list only the\n # hostname is included. So if we detect that the device is a FUSE device\n # then we remove the prefaced string so that the device in state matches\n # the device in the mount list.\n fuse_match = re.match(r\"^\\w+\\#(?P<device_name>.+)\", device)\n if fuse_match:\n if \"device_name\" in fuse_match.groupdict():\n real_device = fuse_match.group(\"device_name\")\n\n if comp_real_name in active:\n if \"superopts\" not in active[comp_real_name]:\n active[comp_real_name][\"superopts\"] = []\n if mount:\n device_list.append(active[comp_real_name][\"device\"])\n device_list.append(os.path.realpath(device_list[0]))\n alt_device = (\n active[comp_real_name][\"alt_device\"]\n if \"alt_device\" in active[comp_real_name]\n else None\n )\n uuid_device = (\n active[comp_real_name][\"device_uuid\"]\n if \"device_uuid\" in active[comp_real_name]\n else None\n )\n label_device = (\n active[comp_real_name][\"device_label\"]\n if \"device_label\" in active[comp_real_name]\n else None\n )\n if alt_device and alt_device not in device_list:\n device_list.append(alt_device)\n if uuid_device and uuid_device not in device_list:\n device_list.append(uuid_device)\n if label_device and label_device not in device_list:\n device_list.append(label_device)\n if opts:\n mount_invisible_options = [\n \"_netdev\",\n \"actimeo\",\n \"bg\",\n \"comment\",\n \"defaults\",\n \"delay_connect\",\n \"direct-io-mode\",\n \"intr\",\n \"loop\",\n \"nointr\",\n \"nobootwait\",\n \"nofail\",\n \"password\",\n \"reconnect\",\n \"retry\",\n \"soft\",\n \"auto\",\n \"users\",\n \"bind\",\n \"nonempty\",\n \"transform_symlinks\",\n \"port\",\n \"backup-volfile-servers\",\n ]\n\n if extra_mount_invisible_options:\n mount_invisible_options.extend(extra_mount_invisible_options)\n\n if hidden_opts:\n mount_invisible_options = list(\n set(mount_invisible_options) | set(hidden_opts)\n )\n\n # options which are provided as key=value (e.g. password=Zohp5ohb)\n mount_invisible_keys = [\n \"actimeo\",\n \"comment\",\n \"credentials\",\n \"direct-io-mode\",\n \"password\",\n \"port\",\n \"retry\",\n \"secretfile\",\n ]\n\n if extra_mount_invisible_keys:\n mount_invisible_keys.extend(extra_mount_invisible_keys)\n\n # Some filesystems have options which should not force a remount.\n mount_ignore_fs_keys = {\"ramfs\": [\"size\"]}\n\n if extra_mount_ignore_fs_keys:\n mount_ignore_fs_keys.update(extra_mount_ignore_fs_keys)\n\n # Some options are translated once mounted\n mount_translate_options = {\n \"tcp\": \"proto=tcp\",\n \"udp\": \"proto=udp\",\n }\n\n if extra_mount_translate_options:\n mount_translate_options.update(extra_mount_translate_options)\n\n trigger_remount = []\n for opt in opts:\n if opt in mount_translate_options:\n opt = mount_translate_options[opt]\n\n keyval_option = opt.split(\"=\")[0]\n if keyval_option in mount_invisible_keys:\n opt = keyval_option\n\n size_match = re.match(\n r\"size=(?P<size_value>[0-9]+)(?P<size_unit>k|m|g)\", opt\n )\n if size_match:\n converted_size = _size_convert(size_match)\n opt = \"size={}k\".format(converted_size)\n # make cifs option user synonym for option username which is reported by /proc/mounts\n if fstype in [\"cifs\"] and opt.split(\"=\")[0] == \"user\":\n opt = \"username={}\".format(opt.split(\"=\")[1])\n\n if opt.split(\"=\")[0] in mount_ignore_fs_keys.get(fstype, []):\n opt = opt.split(\"=\")[0]\n\n # convert uid/gid to numeric value from user/group name\n name_id_opts = {\"uid\": \"user.info\", \"gid\": \"group.info\"}\n if opt.split(\"=\")[0] in name_id_opts and len(opt.split(\"=\")) > 1:\n _givenid = opt.split(\"=\")[1]\n _param = opt.split(\"=\")[0]\n _id = _givenid\n if not re.match(\"[0-9]+$\", _givenid):\n _info = __salt__[name_id_opts[_param]](_givenid)\n if _info and _param in _info:\n _id = _info[_param]\n opt = _param + \"=\" + str(_id)\n\n _active_superopts = active[comp_real_name].get(\"superopts\", [])\n for _active_opt in _active_superopts:\n size_match = re.match(\n r\"size=(?P<size_value>[0-9]+)(?P<size_unit>k|m|g)\",\n _active_opt,\n )\n if size_match:\n converted_size = _size_convert(size_match)\n opt = \"size={}k\".format(converted_size)\n _active_superopts.remove(_active_opt)\n _active_opt = \"size={}k\".format(converted_size)\n _active_superopts.append(_active_opt)\n\n if (\n opt not in active[comp_real_name][\"opts\"]\n and opt not in _active_superopts\n and opt not in mount_invisible_options\n and opt not in mount_ignore_fs_keys.get(fstype, [])\n and opt not in mount_invisible_keys\n ):\n trigger_remount.append(opt)\n\n if trigger_remount:\n if __opts__[\"test\"]:\n ret[\"result\"] = None\n ret[\n \"comment\"\n ] = \"Remount would be forced because options ({}) changed\".format(\n \",\".join(sorted(trigger_remount))\n )\n return ret\n else:\n # Some file systems require umounting and mounting if options change\n # add others to list that require similiar functionality\n if fstype in [\"nfs\", \"cvfs\"] or fstype.startswith(\"fuse\"):\n ret[\"changes\"][\"umount\"] = (\n \"Forced unmount and mount because \"\n + \"options ({}) changed\".format(\n \",\".join(sorted(trigger_remount))\n )\n )\n unmount_result = __salt__[\"mount.umount\"](real_name)\n if unmount_result is True:\n mount_result = __salt__[\"mount.mount\"](\n real_name,\n device,\n mkmnt=mkmnt,\n fstype=fstype,\n opts=opts,\n )\n ret[\"result\"] = mount_result\n else:\n ret[\"result\"] = False\n ret[\"comment\"] = \"Unable to unmount {}: {}.\".format(\n real_name, unmount_result\n )\n return ret\n else:\n ret[\"changes\"][\"umount\"] = (\n \"Forced remount because \"\n + \"options ({}) changed\".format(\n \",\".join(sorted(trigger_remount))\n )\n )\n remount_result = __salt__[\"mount.remount\"](\n real_name,\n device,\n mkmnt=mkmnt,\n fstype=fstype,\n opts=opts,\n )\n ret[\"result\"] = remount_result\n # Cleanup after the remount, so we\n # don't write remount into fstab\n if \"remount\" in opts:\n opts.remove(\"remount\")\n\n # Update the cache\n update_mount_cache = True\n\n mount_cache = __salt__[\"mount.read_mount_cache\"](real_name)\n if \"opts\" in mount_cache:\n _missing = [opt for opt in mount_cache[\"opts\"] if opt not in opts]\n\n if _missing:\n if __opts__[\"test\"]:\n ret[\"result\"] = None\n ret[\"comment\"] = (\n \"Remount would be forced because\"\n \" options ({})\"\n \"changed\".format(\",\".join(_missing))\n )\n return ret\n else:\n # Some file systems require umounting and mounting if options change\n # add others to list that require similiar functionality\n if fstype in [\"nfs\", \"cvfs\"] or fstype.startswith(\"fuse\"):\n ret[\"changes\"][\"umount\"] = (\n \"Forced unmount and mount because \"\n + \"options ({}) changed\".format(opt)\n )\n unmount_result = __salt__[\"mount.umount\"](real_name)\n if unmount_result is True:\n mount_result = __salt__[\"mount.mount\"](\n real_name,\n device,\n mkmnt=mkmnt,\n fstype=fstype,\n opts=opts,\n )\n ret[\"result\"] = mount_result\n else:\n ret[\"result\"] = False\n ret[\"comment\"] = \"Unable to unmount {}: {}.\".format(\n real_name, unmount_result\n )\n return ret\n else:\n ret[\"changes\"][\"umount\"] = (\n \"Forced remount because \"\n + \"options ({}) changed\".format(opt)\n )\n remount_result = __salt__[\"mount.remount\"](\n real_name,\n device,\n mkmnt=mkmnt,\n fstype=fstype,\n opts=opts,\n )\n ret[\"result\"] = remount_result\n # Cleanup after the remount, so we\n # don't write remount into fstab\n if \"remount\" in opts:\n opts.remove(\"remount\")\n\n update_mount_cache = True\n else:\n update_mount_cache = True\n\n if real_device not in device_list:\n # name matches but device doesn't - need to umount\n _device_mismatch_is_ignored = None\n for regex in list(device_name_regex):\n for _device in device_list:\n if re.match(regex, _device):\n _device_mismatch_is_ignored = _device\n break\n if _device_mismatch_is_ignored:\n ret[\"result\"] = True\n ret[\"comment\"] = (\n \"An umount will not be forced \"\n + \"because device matched device_name_regex: \"\n + _device_mismatch_is_ignored\n )\n elif __opts__[\"test\"]:\n ret[\"result\"] = None\n ret[\"comment\"] = (\n \"An umount would have been forced \"\n + \"because devices do not match. Watched: \"\n + device\n )\n else:\n ret[\"changes\"][\"umount\"] = (\n \"Forced unmount because devices \"\n + \"don't match. Wanted: \"\n + device\n )\n if real_device != device:\n ret[\"changes\"][\"umount\"] += \" (\" + real_device + \")\"\n ret[\"changes\"][\"umount\"] += \", current: \" + \", \".join(device_list)\n out = __salt__[\"mount.umount\"](real_name, user=user)\n active = __salt__[\"mount.active\"](extended=True)\n if comp_real_name in active:\n ret[\"comment\"] = \"Unable to unmount\"\n ret[\"result\"] = False\n return ret\n update_mount_cache = True\n else:\n ret[\"comment\"] = \"Target was already mounted\"\n # using a duplicate check so I can catch the results of a umount\n if comp_real_name not in active:\n if mount:\n # The mount is not present! Mount it\n if __opts__[\"test\"]:\n ret[\"result\"] = None\n if os.path.exists(name):\n ret[\"comment\"] = \"{} would be mounted\".format(name)\n elif mkmnt:\n ret[\"comment\"] = \"{} would be created and mounted\".format(name)\n else:\n ret[\n \"comment\"\n ] = \"{} does not exist and would not be created\".format(name)\n return ret\n\n if not os.path.exists(name) and not mkmnt:\n ret[\"result\"] = False\n ret[\"comment\"] = \"Mount directory is not present\"\n return ret\n\n out = __salt__[\"mount.mount\"](name, device, mkmnt, fstype, opts, user=user)\n active = __salt__[\"mount.active\"](extended=True)\n update_mount_cache = True\n if isinstance(out, str):\n # Failed to (re)mount, the state has failed!\n ret[\"comment\"] = out\n ret[\"result\"] = False\n return ret\n elif comp_real_name in active:\n # (Re)mount worked!\n ret[\"comment\"] = \"Target was successfully mounted\"\n ret[\"changes\"][\"mount\"] = True\n elif not os.path.exists(name):\n if __opts__[\"test\"]:\n ret[\"result\"] = None\n if mkmnt:\n ret[\"comment\"] = \"{} would be created, but not mounted\".format(name)\n else:\n ret[\n \"comment\"\n ] = \"{} does not exist and would neither be created nor mounted\".format(\n name\n )\n elif mkmnt:\n __salt__[\"file.mkdir\"](name, user=user)\n ret[\"comment\"] = \"{} was created, not mounted\".format(name)\n else:\n ret[\"comment\"] = \"{} not present and not mounted\".format(name)\n else:\n if __opts__[\"test\"]:\n ret[\"comment\"] = \"{} would not be mounted\".format(name)\n else:\n ret[\"comment\"] = \"{} not mounted\".format(name)\n\n if persist:\n if \"/etc/fstab\" == config:\n # Override default for Mac OS\n if __grains__[\"os\"] in [\"MacOS\", \"Darwin\"]:\n config = \"/etc/auto_salt\"\n\n # Override default for AIX\n elif \"AIX\" in __grains__[\"os\"]:\n config = \"/etc/filesystems\"\n\n if __opts__[\"test\"]:\n if __grains__[\"os\"] in [\"MacOS\", \"Darwin\"]:\n out = __salt__[\"mount.set_automaster\"](\n name, device, fstype, opts, config, test=True\n )\n elif __grains__[\"os\"] in [\"AIX\"]:\n out = __salt__[\"mount.set_filesystems\"](\n name,\n device,\n fstype,\n opts,\n mount,\n config,\n test=True,\n match_on=match_on,\n )\n else:\n out = __salt__[\"mount.set_fstab\"](\n name,\n device,\n fstype,\n opts,\n dump,\n pass_num,\n config,\n test=True,\n match_on=match_on,\n )\n if out != \"present\":\n ret[\"result\"] = None\n if out == \"new\":\n if mount:\n comment = (\n \"{} is mounted, but needs to be \"\n \"written to the fstab in order to be \"\n \"made persistent.\".format(name)\n )\n else:\n comment = (\n \"{} needs to be \"\n \"written to the fstab in order to be \"\n \"made persistent.\".format(name)\n )\n elif out == \"change\":\n if mount:\n comment = \"{} is mounted, but its fstab entry must be updated.\".format(\n name\n )\n else:\n comment = \"The {} fstab entry must be updated.\".format(name)\n else:\n ret[\"result\"] = False\n comment = (\n \"Unable to detect fstab status for \"\n \"mount point {} due to unexpected \"\n \"output '{}' from call to \"\n \"mount.set_fstab. This is most likely \"\n \"a bug.\".format(name, out)\n )\n if \"comment\" in ret:\n ret[\"comment\"] = \"{}. {}\".format(ret[\"comment\"], comment)\n else:\n ret[\"comment\"] = comment\n return ret\n\n else:\n if __grains__[\"os\"] in [\"MacOS\", \"Darwin\"]:\n out = __salt__[\"mount.set_automaster\"](\n name, device, fstype, opts, config\n )\n elif __grains__[\"os\"] in [\"AIX\"]:\n out = __salt__[\"mount.set_filesystems\"](\n name, device, fstype, opts, mount, config, match_on=match_on\n )\n else:\n out = __salt__[\"mount.set_fstab\"](\n name,\n device,\n fstype,\n opts,\n dump,\n pass_num,\n config,\n match_on=match_on,\n )\n\n if update_mount_cache:\n cache_result = __salt__[\"mount.write_mount_cache\"](\n real_name, device, mkmnt=mkmnt, fstype=fstype, mount_opts=opts\n )\n\n if out == \"present\":\n ret[\"comment\"] += \". Entry already exists in the fstab.\"\n return ret\n if out == \"new\":\n ret[\"changes\"][\"persist\"] = \"new\"\n ret[\"comment\"] += \". Added new entry to the fstab.\"\n return ret\n if out == \"change\":\n ret[\"changes\"][\"persist\"] = \"update\"\n ret[\"comment\"] += \". Updated the entry in the fstab.\"\n return ret\n if out == \"bad config\":\n ret[\"result\"] = False\n ret[\"comment\"] += \". However, the fstab was not found.\"\n return ret\n\n return ret" ]
[ "0.7389289", "0.73763126", "0.63220066", "0.6211202", "0.6031662", "0.58249325", "0.5824795", "0.581613", "0.57998055", "0.57176316", "0.5709025", "0.5650004", "0.56220317", "0.55568707", "0.5532059", "0.5520358", "0.54857093", "0.5449704", "0.54308045", "0.5396903", "0.5378369", "0.5364449", "0.53642213", "0.5346254", "0.53456056", "0.53415966", "0.5267692", "0.5265637", "0.5255178", "0.52485967" ]
0.78553987
0
Patch the mount table with new values. Used in test routines
def patch_table(cls, mount_table: ty.List[ty.Tuple[str, str]]): orig_table = cls._mount_table cls._mount_table = list(mount_table) try: yield finally: cls._mount_table = orig_table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_patch_creation(self):\n host = synthetic_host(\"myserver\")\n self.create_simple_filesystem(host)\n\n spare_volume_1 = synthetic_volume_full(host)\n spare_volume_2 = synthetic_volume_full(host)\n\n response = self.api_client.patch(\n \"/api/target/\",\n data={\n \"objects\": [\n {\"kind\": \"OST\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume_1.id},\n {\"kind\": \"MDT\", \"filesystem_id\": self.fs.id, \"volume_id\": spare_volume_2.id},\n ],\n \"deletions\": [],\n },\n )\n self.assertHttpAccepted(response)", "def test_patch_hyperflex_cluster(self):\n pass", "def visit_table(self, sytable):\n self.current.update(sytable)", "def visit_table(self, sytable):\n self.current.update(sytable)", "def test_patch_record(self):\n pass", "def test_update_device(self):\n pass", "def test_update_device(self):\n pass", "def test_update_device_template(self):\n pass", "def patch(self, controller_fs_uuid, patch):\n raise exception.OperationNotPermitted", "def test_update_hyperflex_cluster(self):\n pass", "def patch(self, table_number):\n table = TableDetails.query.get_or_404(table_number)\n\n if 'table_size' in request.json:\n table.table_size = request.json['table_size']\n if 'table_status' in request.json:\n table.table_status = request.json['table_status']\n db.session.commit()\n return table, 200", "def patch_mbean_table_value(self, mbean):\n for attribute in mbean['attributes']:\n if 'Table' in attribute:\n value = attribute['Table']\n attribute['Table'] = Utils.boolean_to_lowercase_literal(value)\n\n logger.debug('MBean patched result : [%s]', mbean)", "def mount(self, mount):\n assert mount in (ComponentBase.MOUNTS)\n if mount==self.MOUNT_HYB:\n raise Exception(\"Unsupported mount.\")\n self._mount = mount\n self._check_mount()", "def test_patch_hyperflex_server_model(self):\n pass", "def test_patch_bios_unit(self):\n pass", "def patch(self, table_number):\n table = TableDetails.query.get_or_404(table_number)\n if 'table_status' in request.json:\n table.table_status = request.json['table_status']\n if 'current_session' in request.json:\n table.current_session = request.json['current_session']\n else:\n table.current_session = None\n db.session.commit()\n\n return table, 200", "def test_set_state_partial(self):\n host = synthetic_host(\"myserver\")\n self.create_simple_filesystem(host)\n mgt_uri = \"/api/target/%s/\" % self.mgt.id\n with mock.patch(\"chroma_core.models.Command.set_state\", mock.Mock(return_value=None)):\n self.api_set_state_partial(mgt_uri, \"unmounted\")\n Command.set_state.assert_called_once()", "def test_patch_hyperflex_cluster_profile(self):\n pass", "def callUpdateTable(self):\r\n self.updateTable()", "def test_patch_pci_device(self):\n pass", "def test_patch_hyperflex_server_firmware_version(self):\n pass", "def stub_out(self, old, new):\n self.useFixture(fixtures.MonkeyPatch(old, new))", "def stub_out(self, old, new):\n self.useFixture(fixtures.MonkeyPatch(old, new))", "def test_update_virt_realm(self):\n pass", "def patch(self):\n\t\t\n\t\t# Create tunnels\n\t\t(module, self.tunnel_source) = create_tunnel(self.remote_source_info)\n\t\tself.modules += [ module ]\n\t\t(module, self.tunnel_sink) = create_tunnel(self.remote_sink_info)\n\t\tself.modules += [ module ]\n\t\t\n\t\t# Connect them to the local devices\n\t\tself.modules = self.modules + [\n\t\t\tadd_loopback(self.tunnel_source, self.local_sink),\n\t\t\tadd_loopback(self.local_source, self.tunnel_sink)\n\t\t]", "def _RemountRootAsReadWrite(self):\n self.RunCmdOnDevice(['mount', '-o', 'remount,rw', '/'])", "def patch(self, patch: 'ParsedNodePatch'):\n # explicitly pick out the parts to update so we don't inadvertently\n # step on the model name or anything\n self.patch_path: Optional[str] = patch.original_file_path\n self.description = patch.description\n self.columns = patch.columns\n self.meta = patch.meta\n self.docs = patch.docs\n if flags.STRICT_MODE:\n assert isinstance(self, JsonSchemaMixin)\n self.to_dict(validate=True, omit_none=False)", "def test_patch_hyperflex_node_profile(self):\n pass", "def test_partially_update_device_by_id1(self):\n pass", "def test_patchhardwares_item(self):\n pass" ]
[ "0.5773485", "0.5543964", "0.55399096", "0.55399096", "0.5520308", "0.5501401", "0.5501401", "0.54936635", "0.5452065", "0.5429425", "0.5401314", "0.5326435", "0.52833074", "0.52653056", "0.5258198", "0.524147", "0.5234455", "0.5214115", "0.51976144", "0.5178994", "0.5161263", "0.51571655", "0.51571655", "0.51545215", "0.51371276", "0.51229244", "0.51070124", "0.51004237", "0.50972366", "0.5067145" ]
0.7760954
0
Takes OPTAA data streamed to shore from the Cabled Array benthic platforms and cleans up the data set to make it more userfriendly. Primary task is renaming parameters and dropping some that are of limited use. Additionally, recalculate the intermediate products (e.g. absorption and attenuation) and add them to the data set. Finally, add the estimated chlorophyll and POC concentrations to the data set. Will test the data set to determine if more than one deployment is present. If so, will raise an exception with an error message. ACS processing requires that the data be processed one deployment at a time in order to properly assign calibration coefficients and pad wavelength arrays.
def optaa_benthic(ds, cal_file): # check to see if there is more than one deployment in the data set if len(np.unique(ds['deployment'].values)) > 1: raise ValueError('More than one deployment in the data set. Please structure processing request to process ' 'one deployment at a time.') # drop some of the variables: # internal_timestamp == time, redundant so can remove # pressure_counts == none of the OOI OPTAAs have a pressure sensor # serial_number == available in the global attributes # meter_type == always the same, not needed # packet_type == always the same, not needed # record_length == always the same, not needed # checksum == not needed, used in data parsing ds = ds.drop(['internal_timestamp', 'pressure_counts', 'serial_number', 'meter_type', 'packet_type', 'record_length', 'checksum']) # check for data from a co-located CTD, if not present create the variables using NaN's as the fill value if 'sea_water_temperature' not in ds.variables: ds['sea_water_temperature'] = ('time', ds['deployment'].data * np.nan) ds['sea_water_practical_salinity'] = ('time', ds['deployment'].data * np.nan) # pull out the number of wavelengths and serial number and then drop the variables (part of the metadata) num_wavelengths = ds.num_wavelengths.values[0].astype(int) serial_number = int(re.sub('[^0-9]', '', ds.attrs['SerialNumber'])) ds = ds.drop('num_wavelengths') # load the calibration coefficients uid = ds.attrs['AssetUniqueID'] start_time = ds['time'][0].values.astype(float) / 10 ** 9 cal = load_cal_coefficients(cal_file, uid, start_time) # check the calibration coefficients against the deployment data if cal.coeffs['serial_number'] != serial_number: raise Exception('Serial Number mismatch between ac-s data and the device file.') if cal.coeffs['num_wavelengths'] != num_wavelengths: raise Exception('Number of wavelengths mismatch between ac-s data and the device file.') # remove the units from the variable names rename = { 'a_signal_dark_counts': 'a_signal_dark', 'a_reference_dark_counts': 'a_reference_dark', 'a_signal_counts': 'a_signal', 'a_reference_counts': 'a_reference', 'c_signal_dark_counts': 'c_signal_dark', 'c_reference_dark_counts': 'c_reference_dark', 'c_signal_counts': 'c_signal', 'c_reference_counts': 'c_reference', 'wavelength': 'wavelength_number' } ds = ds.rename(rename) # Delete the first 45 seconds of the data record per recommendation from the vendor. Note, originally the vendor # recommended deleting the first 45 seconds, then 60 seconds and then 120 seconds. They never provided a data # based reason for the change in recommendation. Within OOI, instruments were programmed to run for 60 seconds, # then 120 seconds and then 240 seconds ... and it is all mixed up across the various data sets. So, we are # going to use the 45-second recommendation and apply it to all data sets. If the vendor ever provides an analysis # justifying the change in recommendation, we can revisit this. ds.elapsed_run_time.values = ds.elapsed_run_time.where(ds.elapsed_run_time / 1000 > 45) ds = ds.dropna(dim='time', subset=['elapsed_run_time']) # convert internal and external temperature sensors from raw counts to degrees Celsius ds['internal_temp'] = opt_internal_temp(ds['internal_temp_raw']) ds['external_temp'] = opt_external_temp(ds['external_temp_raw']) # calculate the median of the remaining data per burst measurement (configured to run hourly for 3 minutes) # calculate the median of the remaining data per burst measurement print('Calculating burst averages ...') start_time = time.time() burst = ds.resample(time='3600s', base=1800, loffset='1800s', skipna=True).reduce(np.median, dim='time', keep_attrs=True) burst = burst.where(~np.isnan(burst.deployment), drop=True) stop_time = time.time() elapsed_time = stop_time - start_time print('... burst averaging complete. Elapsed time: %f seconds' % elapsed_time) # re-process the raw data in order to create the intermediate variables, correcting for the holographic # grating, applying the temperature and salinity corrections and applying a baseline scatter correction # to the absorption data. All intermediate processing outputs are added to the data set. burst = apply_dev(burst, cal.coeffs) burst = apply_tscorr(burst, cal.coeffs, burst.sea_water_temperature, burst.sea_water_practical_salinity) burst = apply_scatcorr(burst, cal.coeffs) # add the jump offsets as NaN's if the grating index correction was not used if 'a_jump_offsets' not in ds.variables: ds['a_jump_offsets'] = ('time', ds['deployment'].data * np.nan) ds['c_jump_offsets'] = ('time', ds['deployment'].data * np.nan) # estimate chlorophyll and POC and calculate select absorption ratios burst = estimate_chl_poc(burst, cal.coeffs) burst = calculate_ratios(burst) # create a xarray dataset of the 2D variables, padding the number of wavelengths to a consistent # length of 100 using fill values. wavelength_number = np.arange(100).astype(int) # used as a dimensional variable pad = 100 - num_wavelengths fill_nan = np.tile(np.ones(pad) * np.nan, (len(burst.time), 1)) fill_int = np.tile(np.ones(pad) * FILL_INT, (len(burst.time), 1)) wavelength_a = np.concatenate([burst.wavelength_a.values, fill_nan], axis=1) wavelength_c = np.concatenate([burst.wavelength_c.values, fill_nan], axis=1) ac = xr.Dataset({ 'wavelength_a': (['time', 'wavelength_number'], wavelength_a), 'a_signal': (['time', 'wavelength_number'], np.concatenate([burst.a_signal, fill_int], axis=1).astype(int)), 'a_reference': (['time', 'wavelength_number'], np.concatenate([burst.a_reference, fill_int], axis=1).astype(int)), 'optical_absorption': (['time', 'wavelength_number'], np.concatenate([burst.optical_absorption, fill_nan], axis=1)), 'apg': (['time', 'wavelength_number'], np.concatenate([burst.apg, fill_nan], axis=1)), 'apg_ts': (['time', 'wavelength_number'], np.concatenate([burst.apg_ts, fill_nan], axis=1)), 'apg_ts_s': (['time', 'wavelength_number'], np.concatenate([burst.apg_ts_s, fill_nan], axis=1)), 'wavelength_c': (['time', 'wavelength_number'], wavelength_c), 'c_signal': (['time', 'wavelength_number'], np.concatenate([burst.c_signal, fill_int], axis=1).astype(int)), 'c_reference': (['time', 'wavelength_number'], np.concatenate([burst.c_reference, fill_int], axis=1).astype(int)), 'beam_attenuation': (['time', 'wavelength_number'], np.concatenate([burst.beam_attenuation, fill_nan], axis=1)), 'cpg': (['time', 'wavelength_number'], np.concatenate([burst.cpg, fill_nan], axis=1)), 'cpg_ts': (['time', 'wavelength_number'], np.concatenate([burst.cpg_ts, fill_nan], axis=1)), }, coords={'time': (['time'], burst.time.values), 'wavelength_number': wavelength_number}) # drop the original 2D variables from the burst data set drop = burst.drop(['wavelength_number', 'wavelength_a', 'a_signal', 'a_reference', 'optical_absorption', 'apg', 'apg_ts', 'apg_ts_s', 'wavelength_c', 'c_signal', 'c_reference', 'beam_attenuation', 'cpg', 'cpg_ts']) # reset the data type for the 'a' and 'c' signal and reference dark values, and the other raw parameters int_arrays = ['a_signal_dark', 'a_reference_dark', 'c_signal_dark', 'c_reference_dark', 'internal_temp_raw', 'external_temp_raw', 'deployment'] for k in drop.variables: if k in int_arrays: drop[k] = drop[k].astype(int) # recombine the two datasets optaa = xr.merge([drop, ac]) # reset the attributes, which the merging drops optaa.attrs = burst.attrs for v in optaa.variables: optaa[v].attrs = burst[v].attrs # reset some attributes for key, value in ATTRS.items(): for atk, atv in value.items(): if key in optaa.variables: optaa[key].attrs[atk] = atv # add the original variable name as an attribute, if renamed for key, value in rename.items(): optaa[value].attrs['ooinet_variable_name'] = key # add the actual number of wavelengths to the dataset as an attribute optaa['wavelength_number'].attrs['actual_wavelengths'] = num_wavelengths # if the filter index was used to adjust the spectral jumps, add that attribute to the data set if cal.coeffs['grate_index']: optaa['a_jump_offsets'].attrs['grate_index'] = cal.coeffs['grate_index'] optaa['c_jump_offsets'].attrs['grate_index'] = cal.coeffs['grate_index'] return optaa
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def optaa_profiler(ds, cal_file):\n # check to see if there is more than one deployment in the data set\n if len(np.unique(ds['deployment'].values)) > 1:\n raise ValueError('More than one deployment in the data set. Please structure processing request to process '\n 'one deployment at a time.')\n\n # drop some of the variables:\n # internal_timestamp == time, redundant so can remove\n # pressure_counts == none of the OOI OPTAAs have a pressure sensor\n # serial_number == available in the global attributes\n # meter_type == always the same, not needed\n # packet_type == always the same, not needed\n # record_length == always the same, not needed\n # checksum == not needed, used in data parsing\n ds = ds.drop(['internal_timestamp', 'pressure_counts', 'serial_number', 'meter_type', 'packet_type',\n 'record_length', 'checksum'])\n\n # check for data from a co-located CTD, if not present create the variables using NaN's as the fill value\n if 'sea_water_temperature' not in ds.variables:\n ds['sea_water_temperature'] = ('time', ds['deployment'].data * np.nan)\n ds['sea_water_practical_salinity'] = ('time', ds['deployment'].data * np.nan)\n\n # pull out the number of wavelengths and serial number and then drop the variable (part of the metadata)\n num_wavelengths = ds.num_wavelengths.values[0].astype(int)\n serial_number = int(re.sub('[^0-9]', '', ds.attrs['SerialNumber']))\n ds = ds.drop('num_wavelengths')\n\n # load the calibration coefficients\n uid = ds.attrs['AssetUniqueID']\n start_time = ds['time'][0].values.astype(float) / 10 ** 9\n cal = load_cal_coefficients(cal_file, uid, start_time)\n\n # check the calibration coefficients against the deployment data\n if cal.coeffs['serial_number'] != serial_number:\n raise Exception('Serial Number mismatch between ac-s data and the device file.')\n if cal.coeffs['num_wavelengths'] != num_wavelengths:\n raise Exception('Number of wavelengths mismatch between ac-s data and the device file.')\n\n # remove the units from the variable name\n rename = {\n 'a_signal_dark_counts': 'a_signal_dark',\n 'a_reference_dark_counts': 'a_reference_dark',\n 'a_signal_counts': 'a_signal',\n 'a_reference_counts': 'a_reference',\n 'c_signal_dark_counts': 'c_signal_dark',\n 'c_reference_dark_counts': 'c_reference_dark',\n 'c_signal_counts': 'c_signal',\n 'c_reference_counts': 'c_reference',\n 'int_ctd_pressure': 'sea_water_pressure',\n 'wavelength': 'wavelength_number'\n }\n ds = ds.rename(rename)\n\n # Delete the first 45 seconds of the data record per recommendation from the vendor. Note, originally the vendor\n # recommended deleting the first 45 seconds, then 60 seconds and then 120 seconds. They never provided a data\n # based reason for the change in recommendation. Within OOI, instruments were programmed to run for 60 seconds,\n # then 120 seconds and then 240 seconds ... and it is all mixed up across the various data sets. So, we are\n # going to use the 45-second recommendation and apply it to all data sets. If the vendor ever provides an analysis\n # justifying the change in recommendation, we can revisit this.\n ds.elapsed_run_time.values = ds.elapsed_run_time.where(ds.elapsed_run_time / 1000 > 45)\n ds = ds.dropna(dim='time', subset=['elapsed_run_time'])\n\n # convert internal and external temperature sensors from raw counts to degrees Celsius\n ds['internal_temp'] = opt_internal_temp(ds['internal_temp_raw'])\n ds['external_temp'] = opt_external_temp(ds['external_temp_raw'])\n\n # create a profile variable to uniquely identify profiles within the dataset...if the profiler moved\n print('Determining profiler movement ...')\n pks, dzdt = updown(ds['depth'].values, 10)\n if len(pks) == 2 and (pks[0] == 0 and pks[1] == len(dzdt) - 1):\n # the profiler never moved, so treat the whole data set as a time series\n print('Profiler was parked for the entire deployment, treating data as a time series using burst averaging.')\n ds['profile'] = ('time', np.zeros(len(ds['time'])).astype(int) - 1)\n\n # calculate the median of the remaining data per burst measurement (configured to run hourly for 3 minutes)\n print('Calculating burst averages ...')\n start_time = time.time()\n binned = ds.resample(time='3600s', base=1800, loffset='1800s', skipna=True).reduce(np.median, dim='time',\n keep_attrs=True)\n binned = binned.where(~np.isnan(binned.deployment), drop=True)\n stop_time = time.time()\n elapsed_time = stop_time - start_time\n print('... burst averaging complete. Elapsed time: %f seconds' % elapsed_time)\n else:\n # the profiler moved, so treat the data as a series of profiles\n print('Profiler moved during the deployment, treating data as a series of profiles.')\n print('Sub-selecting upcast data only from the data set ...')\n dzdt = xr.DataArray(dzdt, dims='time', coords={'time': ds['time']})\n ds = ds.where(dzdt < 0, drop=True)\n print('Creating and adding a profile variable to the data set ...')\n ds = create_profile_id(ds)\n\n # group the data by profile number and bin the data into 25 cm depth bins (nominal ascent rate of the shallow\n # profiler is 5 cm/s, binning at 25 cm will help to reduce the noise in the data and speed up subsequent\n # processing).\n profiles = ds.groupby('profile')\n profiles = [profile[1] for profile in profiles]\n partial_binning = partial(bin_profiles, site_depth=200, bin_size=0.25)\n with ProcessPoolExecutor(max_workers=N_CORES) as executor:\n binned = list(tqdm(executor.map(partial_binning, profiles), total=len(profiles),\n desc='Smoothing and binning each profile into 25 cm depth bins ...', file=sys.stdout))\n\n # reset the dataset now using binned profiles\n binned = [i[0] for i in binned if i is not None]\n binned = xr.concat(binned, 'time')\n binned = binned.sortby(['profile', 'time'])\n\n # confirm dimension order is correct for the wavelength arrays (sometimes the order gets flipped\n # during the binning process)\n binned['wavelength_a'] = binned.wavelength_a.transpose(*['time', 'wavelength_number'])\n binned['wavelength_c'] = binned.wavelength_c.transpose(*['time', 'wavelength_number'])\n\n # reclaim some memory\n del ds, pks, dzdt, profiles, partial_binning, executor\n\n # re-process the raw data in order to create the intermediate variables, correcting for the holographic\n # grating, applying the temperature and salinity corrections and applying a baseline scatter correction\n # to the absorption data. All intermediate processing outputs are added to the data set.\n print('Re-processing the raw data, creating intermediate data products ...')\n binned = apply_dev(binned, cal.coeffs)\n binned = apply_tscorr(binned, cal.coeffs, binned.sea_water_temperature, binned.sea_water_practical_salinity)\n binned = apply_scatcorr(binned, cal.coeffs)\n\n # estimate chlorophyll and POC and calculate select absorption ratios\n binned = estimate_chl_poc(binned, cal.coeffs)\n binned = calculate_ratios(binned)\n\n # create a xarray dataset of the 2D variables, padding the number of wavelengths to a consistent\n # length of 100 using fill values.\n wavelength_number = np.arange(100).astype(int) # used as a dimensional variable\n pad = 100 - num_wavelengths\n fill_nan = np.tile(np.ones(pad) * np.nan, (len(binned.time), 1))\n fill_int = np.tile(np.ones(pad) * FILL_INT, (len(binned.time), 1))\n\n wavelength_a = np.concatenate([binned.wavelength_a.values, fill_nan], axis=1)\n wavelength_c = np.concatenate([binned.wavelength_c.values, fill_nan], axis=1)\n\n ac = xr.Dataset({\n 'wavelength_a': (['time', 'wavelength_number'], wavelength_a),\n 'a_signal': (['time', 'wavelength_number'], np.concatenate([binned.a_signal, fill_int], axis=1).astype(int)),\n 'a_reference': (['time', 'wavelength_number'], np.concatenate([binned.a_reference, fill_int],\n axis=1).astype(int)),\n 'optical_absorption': (['time', 'wavelength_number'], np.concatenate([binned.optical_absorption, fill_nan],\n axis=1)),\n 'apg': (['time', 'wavelength_number'], np.concatenate([binned.apg, fill_nan], axis=1)),\n 'apg_ts': (['time', 'wavelength_number'], np.concatenate([binned.apg_ts, fill_nan], axis=1)),\n 'apg_ts_s': (['time', 'wavelength_number'], np.concatenate([binned.apg_ts_s, fill_nan], axis=1)),\n 'wavelength_c': (['time', 'wavelength_number'], wavelength_c),\n 'c_signal': (['time', 'wavelength_number'], np.concatenate([binned.c_signal, fill_int], axis=1).astype(int)),\n 'c_reference': (['time', 'wavelength_number'], np.concatenate([binned.c_reference, fill_int],\n axis=1).astype(int)),\n 'beam_attenuation': (['time', 'wavelength_number'], np.concatenate([binned.beam_attenuation, fill_nan],\n axis=1)),\n 'cpg': (['time', 'wavelength_number'], np.concatenate([binned.cpg, fill_nan], axis=1)),\n 'cpg_ts': (['time', 'wavelength_number'], np.concatenate([binned.cpg_ts, fill_nan], axis=1)),\n }, coords={'time': (['time'], binned.time.values), 'wavelength_number': wavelength_number})\n\n # drop the original 2D variables from the binned data set\n drop = binned.drop(['wavelength_number', 'wavelength_a', 'a_signal', 'a_reference',\n 'optical_absorption', 'apg', 'apg_ts', 'apg_ts_s',\n 'wavelength_c', 'c_signal', 'c_reference',\n 'beam_attenuation', 'cpg', 'cpg_ts'])\n\n # reset the data type for the 'a' and 'c' signal and reference dark values, and the other raw parameters\n int_arrays = ['a_signal_dark', 'a_reference_dark', 'c_signal_dark', 'c_reference_dark',\n 'internal_temp_raw', 'external_temp_raw', 'deployment', 'profile']\n for k in drop.variables:\n if k in int_arrays:\n drop[k] = drop[k].astype(int)\n\n # recombine the two datasets\n optaa = xr.merge([drop, ac])\n\n # reset the attributes, which the merging drops\n optaa.attrs = binned.attrs\n for v in optaa.variables:\n optaa[v].attrs = binned[v].attrs\n\n # reset some attributes\n for key, value in ATTRS.items():\n for atk, atv in value.items():\n if key in optaa.variables:\n optaa[key].attrs[atk] = atv\n\n # add the original variable name as an attribute, if renamed\n for key, value in rename.items():\n optaa[value].attrs['ooinet_variable_name'] = key\n\n # add the actual number of wavelengths to the dataset as an attribute\n optaa['wavelength_number'].attrs['actual_wavelengths'] = num_wavelengths\n\n # if the filter index was used to adjust the spectral jumps, add that attribute to the data set\n if cal.coeffs['grate_index']:\n optaa['a_jump_offsets'].attrs['grate_index'] = cal.coeffs['grate_index']\n optaa['c_jump_offsets'].attrs['grate_index'] = cal.coeffs['grate_index']\n\n return optaa", "def opt_optical_absorption(aref, asig, traw, awl, aoff, tcal, tbins, ta_arr,\n cpd_ts, cwl, T, PS, rwlngth=715.):\n # reset shapes of input arguments\n # using np.array ndmin=# seems faster than using np.atleast_#d\n aref = np.array(aref, ndmin=2)\n asig = np.array(asig, ndmin=2)\n traw = np.array(traw, ndmin=1)\n awl = np.around(np.array(awl, ndmin=2), decimals=1)\n aoff = np.array(aoff, ndmin=2)\n tcal = np.array(tcal, ndmin=1)\n tbins = np.array(tbins, ndmin=2)\n # note, np.atleast_3d appends the extra dimension;\n # np.array using ndmin prepends the extra dimension.\n ta_arr = np.array(ta_arr, ndmin=3)\n cpd_ts = np.array(cpd_ts, ndmin=2)\n cwl = np.array(cwl, ndmin=2)\n T = np.array(T, ndmin=1)\n PS = np.array(PS, ndmin=1)\n\n # size up inputs\n npackets = awl.shape[0]\n nwavelengths = awl.shape[1]\n # initialize output array\n apd_ts_s = np.zeros([npackets, nwavelengths])\n\n for ii in range(npackets):\n\n # calculate the internal instrument temperature [deg_C]\n tintrn = opt_internal_temp(traw[ii])\n\n # calculate the uncorrected optical absorption coefficient [m^-1]\n apd, _ = opt_pd_calc(aref[ii, :], asig[ii, :], aoff[ii, :], tintrn,\n tbins[ii, :], ta_arr[ii, :, :])\n\n # correct the optical absorption coefficient for temperature and salinity.\n apd_ts = opt_tempsal_corr('a', apd, awl[ii, :], tcal[ii], T[ii], PS[ii])\n\n # correct the optical absorption coefficient for scattering effects\n apd_ts_s_row = opt_scatter_corr(apd_ts, awl[ii, :], cpd_ts[ii, :], cwl[ii, :], rwlngth)\n apd_ts_s[ii, :] = apd_ts_s_row\n\n # return the temperature, salinity and scattering corrected optical\n # absorption coefficient OPTABSN_L2 [m^-1]\n return apd_ts_s", "def sanitize(self):\n # Early versions of CASU catalogues chave multiple columns 'Blank'\n # Numpy will throw an exception if multiple columns have the same\n # name, so we need to rename these columns.\n n_columns = len(self.fits[self.ccd].columns)\n for col in range(26, n_columns, 1):\n name = self.fits[self.ccd].columns[col].name\n if name == 'Blank':\n self.fits[self.ccd].columns[col].name = 'Blank%d' % col\n\n # The headers contain a combination of old- and modern-\n # style WCS parameters for the ZPN projection coefficients, which\n # confuses libwcs. Moreover, in a few cases the keyword values\n # are plainly wrong. Hence we remove the keywords.\n for kw in ['PV1_0', 'PV1_1', 'PV1_2', 'PV1_3',\n 'PV2_0', 'PV2_1', 'PV2_2', 'PV2_3',\n 'PV3_0', 'PV3_1', 'PV3_3', 'PV3_3',\n 'PROJP1', 'PROJP3', 'WAT1_001', 'WAT2_001',\n 'RADECSYS']:\n del self.fits[self.ccd].header[kw]\n\n # ..and enforce the parameters wich have been used by the pipeline\n self.fits[self.ccd].header['EQUINOX'] = 2000.0\n self.fits[self.ccd].header['PV2_1'] = 1.0\n self.fits[self.ccd].header['PV2_3'] = 220.0\n self.fits[self.ccd].header['CUNIT1'] = 'deg'\n self.fits[self.ccd].header['CUNIT2'] = 'deg'\n self.fits[self.ccd].header['RADESYSa'] = 'ICRS'", "def process(data, cluster_criteria, method = \"PP\", \\\n min_height = 0, pixel_size = 0, \\\n relax = 0, stop = 0, \\\n verbose = True, interactive = False,\n n_jobs = 1, nsteps = 1 ):\n\n#==============================================================================#\n \"\"\"\n Initial prep of key variables\n \"\"\"\n\n self = Acorns()\n start = time.time()\n\n # User input information\n self.cluster_criteria = cluster_criteria\n\n if np.size(relax) == 1:\n self.relax = relax if (relax != 0) else -1.0\n relaxcond = True if (relax != 0) else False\n else:\n self.relax = relax\n relaxcond = True\n\n if method == \"PP\":\n self.method = 0\n elif method == \"PPV\":\n self.method = 1\n elif method == \"PPP\":\n self.method = 2\n else:\n raise ValueError('method {0:s} unknown'.format(method))\n method = str(method)\n\n # Generate some important information:\n self.minnpix_cluster = get_minnpix(self, pixel_size, self.cluster_criteria[0])\n self.min_height = min_height\n self.max_dist = get_maxdist(self, pixel_size)\n self.cluster_criteria[0] = self.max_dist\n self.min_sep = 2.*self.cluster_criteria[0]\n self.nsteps = nsteps\n # Prime the acorns information:\n # cluster_arr will be updated with the indices of new clusters\n self.cluster_arr = gen_cluster_arr(self, data, stop)\n self.clusters = {}\n self.forest = {}\n\n#==============================================================================#\n \"\"\"\n Main controlling routine for acorns\n \"\"\"\n\n # Get the unassigned data array\n find_unassigned_data(self, data, stop)\n\n # Gen KDTree\n tree = generate_kdtree(self)\n\n # Generate the unassigned data array\n unassigned_array_length = len(self.unassigned_data[0,:])\n\n count= 0.0\n if verbose:\n progress_bar = print_to_terminal(self, 0, data, count, \\\n unassigned_array_length, method)\n\n # Cycle through the unassigned array\n starthierarchy = time.time()\n for i in range(0, unassigned_array_length):\n\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n\n # Extract the current data point\n data_point = np.array(self.unassigned_data[:,i])\n # Retrieve this data point's location in the data array\n data_idx = get_data_index(self, data, data_point)\n self.cluster_arr[0,i] = int(data_idx)\n\n # Every data point begins as a new cluster\n self.cluster_idx = i\n bud_cluster = Cluster(data_point, data_idx, idx=self.cluster_idx, acorns=self)\n\n # Calculate distances between all data points\n link = get_links(self, i, i, tree, n_jobs)\n\n # Find clusters that are closely associated with the current data\n # point\n linked_clusters = find_linked_clusters(self, data, i, bud_cluster, link)\n\n if (self.method==1) & (len(linked_clusters) >= 1):\n linked_clusters = check_other_components(self, i, i, data_idx, data, linked_clusters, bud_cluster, tree, n_jobs, re=False)\n\n \"\"\"\n\n Notes\n -----\n\n Now try and merge this cluster with surrounding linked_clusters.\n From this point on there are three options for that data_point:\n\n 1. If no linked clusters are found - add the bud cluster to the\n cluster dictionary.\n 2. If a single linked cluster is found - merge the two.\n 3. If multiple linked clusters are found, check the validity of each\n cluster and either merge non-independent clusters or form a\n branch.\n\n This philosophy follows that of agglomerative hierarchical\n clustering techniques. The basic principle is discussed here:\n http://scikit-learn.org/stable/modules/clustering.html under\n \"2.3.6. Hierarchical Clustering\".\n\n A single link measure is used to connect clusters. The strategy is\n adapted from the general methods of:\n\n astrodendro:\n https://github.com/dendrograms/astrodendro\n Copyright (c) 2013 Thomas P. Robitaille, Chris Beaumont, Braden\n MacDonald, and Erik Rosolowsky\n quickclump:\n https://github.com/vojtech-sidorin/quickclump\n Copyright (c) 2016 Vojtech Sidorin\n\n When linking using the \"PPV\" methodology, single link measures may\n be insufficient and additional connectivity constraints are applied.\n Specifically - it is imposed that no two spectral features extracted\n from the same location can be merged into the same cluster.\n\n Additionally, an additional linking strategy is implemented which\n takes into account of the variance in the properties of the linked\n clusters (specifically those selected by the user). This is only\n implemented when trying to resolve ambiguities and is used as a way\n of establishing the \"strongest\" links when multiple spectral\n features have been detected.\n\n \"\"\"\n\n if not linked_clusters:\n add_to_cluster_dictionary(self, bud_cluster)\n elif len(linked_clusters) == 1:\n merge_into_cluster(self, data, linked_clusters[0], bud_cluster)\n else:\n resolve_ambiguity(self, data, linked_clusters, bud_cluster)\n\n if verbose:\n progress_bar.progress = 100\n progress_bar.show_progress()\n print('')\n print('')\n\n # Remove insignificant clusters from the clusters dictionary and update\n # the unassigned array\n cluster_list, cluster_indices = update_clusters(self, data)\n\n # Take a second pass at the data without relaxing the linking criteria\n # to pick up any remaining stragglers not linked during the first pass\n if (np.size(self.unassigned_data_updated)>1):\n cluster_list, cluster_indices = relax_steps(self, 0, data, method, verbose, tree, n_jobs, second_pass=True)\n endhierarchy = time.time()-starthierarchy\n\n#==============================================================================#\n \"\"\"\n Secondary controlling routine for acorns implemented if the linking\n criteria are relaxed by the user\n\n \"\"\"\n\n if relaxcond and (not interactive) and (np.size(self.unassigned_data_updated)>1):\n startrelax = time.time()\n inc = self.relax/self.nsteps\n cluster_criteria_original = cluster_criteria\n for j in range(1, self.nsteps+1):\n self.cluster_criteria = get_relaxed_cluster_criteria(j*inc, cluster_criteria_original)\n cluster_list, cluster_indices = relax_steps(self, j, data, method, verbose, tree, n_jobs, second_pass=False)\n endrelax = time.time()-startrelax\n\n elif interactive and (np.size(self.unassigned_data_updated)>1):\n startrelax = time.time()\n cluster_criteria_original = cluster_criteria\n #plotting.plot_scatter(self)\n stop = True\n while (not stop): #stop != False:\n self.relax = np.array(eval(input(\"Please enter relax values in list format: \")))\n print('')\n self.cluster_criteria = get_relaxed_cluster_criteria(self.relax, cluster_criteria_original)\n cluster_list, cluster_indices = relax_steps(self, j, data, method, verbose, tree, n_jobs, second_pass=False)\n #plotting.plot_scatter(self)\n s = str(input(\"Would you like to continue? \"))\n print('')\n stop = s in ['True', 'T', 'true', '1', 't', 'y', 'yes', 'Y', 'Yes']\n endrelax = time.time()-startrelax\n\n else:\n startrelax = time.time()\n endrelax = time.time()-startrelax\n\n#==============================================================================#\n \"\"\"\n Tidy everything up for output\n\n \"\"\"\n\n cluster_list, cluster_indices = update_clusters(self, data)\n io.reshape_cluster_array(self, data)\n get_forest(self, verbose)\n\n end = time.time()-start\n\n if verbose:\n print('acorns took {0:0.1f} seconds for completion.'.format(end))\n print('Primary clustering took {0:0.1f} seconds for completion.'.format(endhierarchy))\n if relaxcond==True:\n print('Secondary clustering took {0:0.1f} seconds for completion.'.format(endrelax))\n print('')\n print('acorns found a total of {0} clusters.'.format(len(self.clusters)))\n print('')\n print('A total of {0} data points were used in the search.'.format(len(self.unassigned_data[0,:])))\n print('A total of {0} data points were assigned to clusters.'.format(num_links(self)))\n if (np.size(self.unassigned_data_relax)>1):\n print('A total of {0} data points remain unassigned to clusters.'.format(len(self.unassigned_data_relax[0,:])))\n else:\n print('A total of 0 data points remain unassigned to clusters.')\n print('')\n\n io.housekeeping(self)\n\n return self", "def clean():\n filter_phase_data()\n combine_phase_data()\n remove_duplicates_phase_data()", "def main():\n\n # Create argument parser\n parser = ArgumentParser()\n parser.add_argument('datadir', type=str, help='Directory of LC files')\n parser.add_argument('metatable', type=str,\n help='Metatable containing each object, redshift, peak time guess, mwebv, object type')\n parser.add_argument('--zpt', type=float, default=DEFAULT_ZPT, help='Zero point of LCs')\n parser.add_argument('--lm', type=float, default=DEFAULT_LIM_MAG, help='Survey limiting magnitude')\n parser.add_argument('--outdir', type=str, default='./products/',\n help='Path in which to save the LC data (single file)')\n args = parser.parse_args()\n\n objs, redshifts, obj_types, peaks, ebvs = read_in_meta_table(args.metatable)\n\n # Grab all the LC files in the input directory\n file_names = []\n for obj in objs:\n file_name = args.datadir + 'PS1_PS1MD_' + obj + '.snana.dat'\n file_names.append(file_name)\n\n # Create a list of LC objects from the data files\n lc_list = read_in_LC_files(file_names, objs)\n\n # This needs to be redone when retrained\n # TODO: Need to change this whenever you retrain...\n filt_dict = {'g': 0, 'r': 1, 'i': 2, 'z': 3}\n wvs = np.asarray([5460, 6800, 7450, 8700])\n\n # Update the LC objects with info from the metatable\n my_lcs = []\n for i, my_lc in enumerate(lc_list):\n my_lc.add_LC_info(zpt=args.zpt, mwebv=ebvs[i],\n redshift=redshifts[i], lim_mag=args.lm,\n obj_type=obj_types[i])\n my_lc.get_abs_mags()\n my_lc.sort_lc()\n pmjd = my_lc.find_peak(peaks[i])\n my_lc.shift_lc(pmjd)\n my_lc.correct_time_dilation()\n my_lc.filter_names_to_numbers(filt_dict)\n my_lc.correct_extinction(wvs)\n my_lc.cut_lc()\n my_lc.make_dense_LC(4)\n my_lcs.append(my_lc)\n save_lcs(my_lcs, args.outdir)", "def split_inputs(self):\n\n lca = self.lca\n inputs = self.inputs\n\n inputs_dict = {} # Only store exchanges with uncertainty\n\n # Keep track of which tech_params and bio_params are already included to the analysis\n # Needed to avoid running sa indices computations twice for the same tech or bio params. \n # Initialize with parameterized exchanges\n if self.parameters != None and self.ParametersModel != None:\n indices_tech_all = self.parameters_dict['tech_params_where']\n indices_bio_all = self.parameters_dict['bio_params_where']\n else:\n indices_tech_all = np.array([], dtype=int)\n indices_bio_all = np.array([], dtype=int)\n\n for input_ in inputs:\n\n if input_ == 'biosphere':\n continue\n\n inputs_dict[input_] = {}\n\n indices_tech = np.array([], dtype=int)\n indices_bio = np.array([], dtype=int)\n\n if input_ == 'technosphere':\n indices_tech = np.where(lca.tech_params['uncertainty_type']!=0)[0]\n if 'biosphere' in inputs:\n indices_bio = np.where(lca.bio_params['uncertainty_type']!=0)[0]\n\n elif input_ == 'demand_exc':\n # Select all products that pertain to activities in the given demand vector\n for act_index in np.nonzero(lca.demand_array)[0]:\n mask_tech = np.all([lca.tech_params['uncertainty_type']!=0, lca.tech_params['col']==act_index], axis=0)\n indices_tech = np.concatenate([indices_tech, np.where(mask_tech)[0]])\n if 'biosphere' in inputs:\n mask_bio = np.all([lca.bio_params['uncertainty_type']!=0, lca.bio_params['col']==act_index], axis=0)\n indices_bio = np.concatenate([indices_bio, np.where(mask_bio)[0]])\n\n elif input_ in self.databases:\n # Select all products and flows that are linked to the given database\n # Indices corresponding to exchanges in the tech_params depending on the given database\n db_act_indices_tech = [val for key,val in lca.activity_dict.items() if key[0]==input_]\n if len(db_act_indices_tech) > 0:\n db_act_index_min_tech = db_act_indices_tech[0]\n db_act_index_max_tech = db_act_indices_tech[-1]\n mask = lambda i : np.all( [lca.tech_params['uncertainty_type']!=0, \n lca.tech_params['col']==i,\n lca.tech_params['amount']!=0], axis=0 )\n indices_tech = [ np.where( mask(i) ) [0] for i in range(db_act_index_min_tech, db_act_index_max_tech+1) ]\n indices_tech = np.concatenate(indices_tech)\n\n # Indices corresponding to flows in the biosphere params depending on the given database\n if 'biosphere' in inputs:\n mask = lambda j : np.all( [lca.bio_params['uncertainty_type']!=0, lca.bio_params['col']==j], axis=0 )\n indices_bio = [ np.where(mask(j))[0] for j in range(db_act_index_min_tech, db_act_index_max_tech+1) ]\n indices_bio = np.concatenate(indices_bio)\n\n indices_tech = np.sort(indices_tech)\n indices_bio = np.sort(indices_bio)\n\n # Do not add indices_tech that are already in the indices_tech_all\n indices_tech_same = np.intersect1d(indices_tech, indices_tech_all)\n pos_tech = np.array([ np.where(indices_tech==s)[0] for s in indices_tech_same ]).flatten()\n indices_tech = np.delete(indices_tech, pos_tech)\n np.append(indices_tech_all, indices_tech)\n\n # Do not add indices_bio that are already in the indices_bio_all\n indices_bio_same = np.intersect1d(indices_bio, indices_bio_all)\n pos_bio = np.array([ np.where(indices_bio==s)[0] for s in indices_bio_same ]).flatten()\n indices_bio = np.delete(indices_bio, pos_bio)\n np.append(indices_bio_all, indices_bio)\n \n inputs_dict[input_]['tech_params'] = lca.tech_params[indices_tech] #TODO maybe remove later, indices should be sufficient\n inputs_dict[input_]['tech_params_where'] = indices_tech\n inputs_dict[input_]['tech_n_params'] = len(indices_tech) #TODO remove later\n\n inputs_dict[input_]['bio_params'] = lca.bio_params[indices_bio] #TODO maybe remove later\n inputs_dict[input_]['bio_params_where'] = indices_bio\n inputs_dict[input_]['bio_n_params'] = len(indices_bio)\n\n\n self.indices_tech_all = indices_tech_all #TODO remove later\n self.indices_bio_all = indices_bio_all\n self.inputs_dict = inputs_dict", "def execute():\r\n arcpy.AddMessage(\"START BCA Processing\")\r\n arcpy.env.workspace = config.temp_data_gdb\r\n arcpy.env.overwriteOutput = True\r\n sys.path.append(config.notif_system_script_folder)\r\n\r\n # Other Variables\r\n arcpy.AddMessage(\"Import toolbox\")\r\n arcpy.ImportToolbox(config.notif_toolbox)\r\n REGEX_FOR_INVALID_CHARS = re.compile(r'[^0-9a-zA-Z]+')\r\n todayDate = datetime.datetime.now().strftime(\"%Y%m%d\")\r\n logFile = file(\r\n config.report_processing_log + \"\\\\\" + todayDate + \"_NotificationSystemLog\" + \".txt\", \"a\")\r\n\r\n\r\n # get all unzipped files uploaded to shared folder\r\n configfiles = [os.path.join(dirpath, f)\r\n for dirpath, dirnames, files in os.walk(config.SharedFolder)\r\n for f in files if f.endswith('.csv') or f.endswith('.xls') or f.endswith('.xlsx') or f.endswith('.XLS')]\r\n\r\n correct_config_files = [f for f in configfiles if \"\\BCAWeeklyPermitReport\\\\\" in f]\r\n\r\n # PREPARE workspace\r\n arcpy.AddMessage(\"Preparing workspace...\")\r\n for BCAreport in correct_config_files:\r\n\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n\r\n MukimConstruct = arcpy.SearchCursor(config.MukimConstructSource)\r\n PermitDateExists = False\r\n\r\n for row in MukimConstruct:\r\n aux = input_file_name[:8]\r\n if \"CORRECTED\" not in BCAreport.upper():\r\n filedate = datetime.datetime.strptime(aux, \"%Y%m%d\")\r\n else:\r\n clean_filename = input_file_name.split(\".\")[0]\r\n filedate = datetime.datetime.strptime(clean_filename[-8:], \"%Y%m%d\")\r\n if filedate == row.PERMIT_DATE and \"CORRECTED\" not in BCAreport.upper():\r\n PermitDateExists = True\r\n break\r\n if PermitDateExists and \"CORRECTED\" not in BCAreport.upper():\r\n PermitDateExistsLog = file(\r\n config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] +\r\n \" file's Permit Date already exists\" + \".log\",\r\n \"a\")\r\n PermitDateExistsLog.write(\r\n \"Permit Date for the file \" + input_file_name + \" already exists in Mukim Construct at \" + str(\r\n datetime.datetime.now()))\r\n logFile.writelines(\r\n \"Permit Date for the file \" + input_file_name + \" already exists in Mukim Construct at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n\r\n else:\r\n\r\n # 00. Creation of geodatabases that will serve as workspaces\r\n logFile.writelines(\"00 Creation of temp gdb starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n if arcpy.Exists(config.TempDataGDB):\r\n arcpy.Delete_management(config.TempDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Temp_data.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Temp_data.gdb\")\r\n\r\n if arcpy.Exists(config.SDEDataGDB):\r\n arcpy.Delete_management(config.SDEDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Source.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Source.gdb\")\r\n\r\n if arcpy.Exists(config.CurrentMukimConstructDataGDB):\r\n arcpy.Delete_management(config.CurrentMukimConstructDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Final_data.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Final_data.gdb\")\r\n\r\n logFile.writelines(\"00 Creation of temp gdb ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 01. Import the base data\r\n logFile.writelines(\"01 Import of base data starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.CurrentMukimConstructDataGDB,\r\n \"MUKIM_CONSTRUCT\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructByProjSource, config.CurrentMukimConstructDataGDB,\r\n \"MUKIM_CONSTRUCT_BYPROJ\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.DepotSource, config.SDEDataGDB, \"DepotBoundary\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.CatchmentSource, config.SDEDataGDB, \"CatchmentBoundary\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.LandlotSource, config.TempDataGDB, \"Land_lot\", \"\", \"\", \"\")\r\n # Calculate the lot key without letter\r\n arcpy.AddField_management(config.LandLot, \"Lotkey_wo_letter\", \"TEXT\", \"\", \"\", \"10\", \"\", \"NULLABLE\", \"NON_REQUIRED\",\r\n \"\")\r\n arcpy.CalculateField_management(config.LandLot, \"Lotkey_wo_letter\", \"!lot_key![:10]\", \"PYTHON\", \"\")\r\n\r\n logFile.writelines(\"01 Import of base data ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n\r\n # START THE LOOP TO PROCESS ALL THE FILES\r\n clcounter = 0\r\n\r\n if len(correct_config_files) == 0:\r\n logFile.writelines(\"No BCA report to process at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n arcpy.AddMessage(\"Processing files...\")\r\n for BCAreport in configfiles:\r\n\r\n clcounter += 1\r\n arcpy.AddMessage(BCAreport)\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n MukimConstruct = arcpy.SearchCursor(config.MukimConstructSource)\r\n PermitDateExists = False\r\n\r\n # CHEKC FILE DATE EXISTS\r\n for row in MukimConstruct:\r\n aux = input_file_name[:8]\r\n if \"CORRECTED\" not in BCAreport.upper():\r\n filedate = datetime.datetime.strptime(aux, \"%Y%m%d\")\r\n else:\r\n clean_filename = input_file_name.split(\".\")[0]\r\n filedate = datetime.datetime.strptime(clean_filename[-8:], \"%Y%m%d\")\r\n if filedate == row.PERMIT_DATE and \"CORRECTED\" not in input_file_name.upper():\r\n PermitDateExists = True\r\n break\r\n\r\n HEADERVALID = True\r\n with xlrd.open_workbook(BCAreport) as wb:\r\n sh = wb.sheet_by_index(0)\r\n for r in range(sh.nrows):\r\n colcount = 0\r\n if sh.row_values(r)[colcount] == 'Error_Message':\r\n HEADERVALID = True\r\n elif sh.row_values(r)[colcount] == 'Project Ref No' or sh.row_values(r)[colcount] == 'Project_Ref_No':\r\n HEADERVALID = True\r\n else:\r\n PermitDateExistsLog = file(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[\r\n 0] + \" file's header format is not acceptable for processing\" + \".log\", \"a\")\r\n PermitDateExistsLog.write(\r\n \"The header format for the file \" + input_file_name + \" is not acceptable for processing at \" + str(\r\n datetime.datetime.now()))\r\n logFile.writelines(\r\n \"The header format for the file \" + input_file_name + \" is not acceptable for processing at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n HEADERVALID = False\r\n break\r\n\r\n if not PermitDateExists and HEADERVALID:\r\n logFile.writelines(\"Starts processing \" + BCAreport + \" at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # Status update to run/not run the SiteInspection Update\r\n Log_SiteInspectionUpdate = file(config.SiteInspectionUpdate, \"w\")\r\n Log_SiteInspectionUpdate.writelines(\"NO\")\r\n Log_SiteInspectionUpdate.close()\r\n\r\n # 02. Import the BCA report to a geodatabase table\r\n logFile.writelines(\"02 Import of table to gdb starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\ConvertedBCAreport_02\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\ConvertedBCAreport_02\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"ConvertedBCAreport_02\", config.TemplateConvertedBCAreport)\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"ConvertedBCAreport_02\", config.TemplateConvertedBCAreport)\r\n if arcpy.Exists(BCAreport[:-5] + '_err' + '.csv'):\r\n # rename old error report\r\n os.remove(BCAreport[:-5] + '_err' + '.csv')\r\n else:\r\n result = \"Error file does not exist\"\r\n if BCAreport.endswith('.xls') or BCAreport.endswith('.xlsx') or BCAreport.endswith('.XLS'):\r\n rows_out = arcpy.InsertCursor(config.BCAReportGDBTable)\r\n fldlist = arcpy.ListFields(config.BCAReportGDBTable)\r\n fldlist.pop(0)\r\n with xlrd.open_workbook(BCAreport) as wb:\r\n sh = wb.sheet_by_index(0)\r\n for r in range(sh.nrows):\r\n colcount = 0\r\n if sh.row_values(r)[colcount] != 'Error_Message':\r\n colcount = 0\r\n else:\r\n colcount = 1\r\n break\r\n for r in range(sh.nrows):\r\n colcounter = colcount\r\n if r > 0:\r\n new_row_out = rows_out.newRow()\r\n for efld in fldlist:\r\n if efld.name <> 'OBJECTID' and efld.name <> 'ConcatFields':\r\n new_row_out.setValue(efld.name, sh.row_values(r)[colcounter])\r\n colcounter += 1\r\n\r\n logFile.writelines(\"Inserting: \" + str(new_row_out) + \"\\n\")\r\n rows_out.insertRow(new_row_out)\r\n del rows_out, new_row_out\r\n\r\n elif BCAreport.endswith('.csv'):\r\n\r\n BCAreportread = csv.DictReader(open(BCAreport, 'rb'), delimiter=',', quotechar='\"')\r\n rows_out = arcpy.InsertCursor(config.BCAReportGDBTable)\r\n for attribute in BCAreportread:\r\n new_row_out = rows_out.newRow()\r\n new_row_out.Project_Ref_No = attribute['Project_Ref_No']\r\n new_row_out.Project_Title = attribute['Project_Title']\r\n new_row_out.House_Blk_No = attribute['House_Blk_No']\r\n new_row_out.Road_Name = attribute['Road_Name']\r\n new_row_out.Level_No = attribute['Level_No']\r\n new_row_out.Unit_No = attribute['Unit_No']\r\n new_row_out.Building_Name = attribute['Building_Name']\r\n new_row_out.Postal_Code = attribute['Postal_Code']\r\n new_row_out.Project_Mukim_nos = attribute['Project_Mukim_nos']\r\n new_row_out.Project_Lot_nos = attribute['Project_Lot_nos']\r\n new_row_out.Permit_Type_of_Work = attribute['Permit_Type_of_Work']\r\n new_row_out.Type_of_Work = attribute['Type_of_Work']\r\n new_row_out.Owner_s_name = attribute['Owners_name']\r\n new_row_out.Owner_s_firm_name = attribute['Owners_firm_name']\r\n new_row_out.Owner_s_address = attribute['Owners_address']\r\n new_row_out.Owner_s_Tel_No = attribute['Owners_Tel_No']\r\n new_row_out.Owner_s_Email_address = attribute['Owners_Email_address']\r\n new_row_out.Builder_s_name = attribute['Builders_name']\r\n new_row_out.Builder_s_firm_name = attribute['Builders_firm_name']\r\n new_row_out.Builder_s_address = attribute['Builders_address']\r\n new_row_out.Builder_s_Tel_No = attribute['Builders_Tel_No']\r\n new_row_out.Builder_s_email_address = attribute['Builders_email_address']\r\n new_row_out.PE_s_name = attribute['PEs_name']\r\n new_row_out.PE_s_firm_name = attribute['PEs_firm_name']\r\n new_row_out.PE_s_address = attribute['PEs_address']\r\n new_row_out.PE_s_Tel_No = attribute['PEs_Tel_No']\r\n new_row_out.PE_s_Email_address = attribute['PEs_Email_address']\r\n new_row_out.Architect_s_name = attribute['Architects_name']\r\n new_row_out.Architect_s_firm_name = attribute['Architects_firm_name']\r\n new_row_out.Architect_s_address = attribute['Architects_address']\r\n new_row_out.Architect_s_Tel_No = attribute['Architects_Tel_No']\r\n new_row_out.Architect_s_Email_address = attribute['Architects_Email_address']\r\n new_row_out.Project_Cost = attribute['Project_Cost']\r\n new_row_out.Project_Duration = attribute['Project_Duration']\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = attribute['Approval_Date']\r\n rows_out.insertRow(new_row_out)\r\n if new_row_out:\r\n del new_row_out\r\n if rows_out:\r\n del rows_out\r\n\r\n except:\r\n log_error(\"Error in 02 Import of table to gdb: \", logFile)\r\n logFile.writelines(\"02 Import of table to gdb ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 03. Remove spaces in key fields for the concatenation\r\n logFile.writelines(\"03 Removing of spaces starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsSpace = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n\r\n for row in rowsSpace:\r\n ProjRef = row.Project_Ref_No.strip()\r\n ProjMukim = row.Project_Mukim_nos.strip()\r\n ProjLot = row.Project_Lot_nos.strip()\r\n BuilderN = row.Builder_s_name.strip()\r\n row.Project_Ref_No = ProjRef\r\n row.Project_Mukim_nos = ProjMukim\r\n row.Project_Lot_nos = ProjLot\r\n row.Builder_s_name = BuilderN\r\n rowsSpace.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpace:\r\n del rowsSpace\r\n except:\r\n log_error(\"Error in 03 Removing of spaces: \", logFile)\r\n logFile.writelines(\"03 Removing of spaces ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 04. Concatenate Project_Ref_No, Project_Mukim_nos, Project_Lot_nos, Builder_s_name\r\n logFile.writelines(\"04 Concatenate the three fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rows = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n for row in rows:\r\n expression = str(row.Project_Ref_No) + \"-\" + str(row.Project_Mukim_nos) + \"-\" + str(\r\n row.Project_Lot_nos) + \"-\" + str(row.Builder_s_name)\r\n row.ConcatFields = expression\r\n rows.updateRow(row)\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n\r\n except:\r\n log_error(\"Error in 04 Concatenate the three fields: \", logFile)\r\n logFile.writelines(\"04 Concatenate the three fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 05. Create temporary tables for Unique and Duplicate records\r\n logFile.writelines(\"05 Create temporary tables starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\Uniquerows\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\Uniquerows\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Uniquerows\", config.TemplateConcat, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Uniquerows\", config.TemplateConcat, \"\")\r\n\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\Duplicaterows\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\Duplicaterows\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Duplicaterows\", config.TemplateConcat, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Duplicaterows\", config.TemplateConcat, \"\")\r\n except:\r\n log_error(\"Error in 05 Create temporary tables: \", logFile)\r\n logFile.writelines(\"05 Create temporary tables ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 06. Separate unique and duplicate records\r\n logFile.writelines(\"06 Separate unique and duplicate rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n print \"Start step 06\"\r\n rows_inCB02 = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n rows_outUnique = arcpy.InsertCursor(config.UniqueRecords)\r\n # print rows_outUnique\r\n rows_outDuplicate = arcpy.InsertCursor(config.DuplicateRecords)\r\n\r\n rows_unique = []\r\n rows_duplicates = []\r\n for row in rows_inCB02:\r\n if row.ConcatFields not in rows_unique:\r\n rows_unique = rows_unique + [row.ConcatFields]\r\n else:\r\n rows_duplicates = rows_duplicates + [row.ConcatFields]\r\n\r\n print \"Start step 06 1\"\r\n for item in rows_unique:\r\n print \"clcounter: \" + str(clcounter)\r\n print \"item: \" + str(item)\r\n newrow = rows_outUnique.newRow()\r\n newrow.Concat = item\r\n # print newrow\r\n rows_outUnique.insertRow(newrow)\r\n\r\n print \"Start step 06 2\"\r\n for item in rows_duplicates:\r\n print \"clcounter: \" + str(clcounter)\r\n print \"item: \" + str(item)\r\n newrow = rows_outDuplicate.newRow()\r\n newrow.Concat = item\r\n rows_outDuplicate.insertRow(newrow)\r\n\r\n print \"Start step 06 3\"\r\n\r\n if rows_inCB02:\r\n del rows_inCB02\r\n if rows_outUnique:\r\n del rows_outUnique\r\n if rows_outDuplicate:\r\n del rows_outDuplicate\r\n if row:\r\n del row\r\n except:\r\n log_error(\"Error in 06 Separate unique and duplicate rows: \", logFile)\r\n logFile.writelines(\"06 Separate unique and duplicate rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 07. Get the rest of the fields for Uniquerows table\r\n logFile.writelines(\r\n \"07 Get the rest of the fields for unique rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.workspace = config.TempDataGDB\r\n arcpy.AddMessage(\"Starting toolbox JoinUniqueRestofFields\")\r\n\r\n try:\r\n arcpy.JoinUniqueRestofFields()\r\n except:\r\n log_error(\"Error in 07 Get the rest of the fields for unique rows: \", logFile)\r\n logFile.writelines(\r\n \"07 Get the rest of the fields for unique rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 08. Get the rest of the fields for Duplicaterows table\r\n logFile.writelines(\r\n \"08 Get the rest of the fields for duplicate rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"START toolbox JoinDuplicateRestofFields\")\r\n try:\r\n arcpy.JoinDuplicateRestofFields()\r\n\r\n except:\r\n log_error(\"Error in 08 Get the rest of the fields for duplicate rows: \", logFile)\r\n\r\n logFile.writelines(\r\n \"08 Get the rest of the fields for duplicate rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 09. Log duplicate records\r\n logFile.writelines(\"09 Log duplicate records starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"Logging duplicate records\")\r\n try:\r\n # Initialize the error log\r\n wbk = xlwt.Workbook()\r\n sheet = wbk.add_sheet('Book 1')\r\n row_count = 0\r\n col_count = 0\r\n header = ['Error_Message', 'Project_Ref_No', 'Project_Title', 'House_Blk_No', 'Road_Name', 'Level_No',\r\n 'Unit_No', 'Building_Name', 'Postal_Code', 'Project_Mukim_nos', 'Project_Lot_nos',\r\n 'Permit_Type_of_Work', 'Type_of_Work', 'Owners_name', 'Owners_firm_name', 'Owners_address',\r\n 'Owners_Tel_No', 'Owners_Email_address', 'Builders_name', 'Builders_firm_name',\r\n 'Builders_address', 'Builders_Tel_No', 'Builders_email_address', 'PEs_name', 'PEs_firm_name',\r\n 'PEs_address', 'PEs_Tel_No', 'PEs_Email_address', 'Architects_name', 'Architects_firm_name',\r\n 'Architects_address', 'Architects_Tel_No', 'Architects_Email_address', 'Project_Cost',\r\n 'Project_Duration', 'Approval_Date']\r\n for fieldname in header:\r\n sheet.write(row_count, col_count, fieldname)\r\n col_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n # Log duplicate records\r\n rows = arcpy.SearchCursor(config.DuplicateRows)\r\n\r\n row_count = 1\r\n col_count = 0\r\n row = None\r\n for row in rows:\r\n message = ['Duplicate record in the BCA report', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n except:\r\n log_error(\"Error in 09 Log duplicate records: \", logFile)\r\n\r\n logFile.writelines(\"09 Log duplicate records ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 10. Split rows based on Mukim numbers\r\n logFile.writelines(\"10 Splitting of rows based on mukim starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.SplittedMukimRows):\r\n arcpy.Delete_management(config.SplittedMukimRows)\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_mukim_03\", config.TemplateBCAReport, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_mukim_03\", config.TemplateBCAReport, \"\")\r\n\r\n if arcpy.Exists(config.SplittedProjLotRows):\r\n arcpy.Delete_management(config.SplittedProjLotRows)\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_projlot_04\", config.TemplateBCAReport, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_projlot_04\", config.TemplateBCAReport, \"\")\r\n\r\n rows_in = arcpy.SearchCursor(config.UniqueRows)\r\n rows_out = arcpy.InsertCursor(config.SplittedMukimRows)\r\n\r\n for row in rows_in:\r\n list_mukim_nos = row.Project_Mukim_nos.split(\",\")\r\n for proj_mukim_nos_id in list_mukim_nos:\r\n new_row_out = rows_out.newRow()\r\n new_row_out.Project_Mukim_nos = proj_mukim_nos_id\r\n new_row_out.PROJECTMUKIM_RAW = row.Project_Mukim_nos\r\n new_row_out.Project_Ref_No = row.Project_Ref_No\r\n new_row_out.Project_Title = row.Project_Title\r\n new_row_out.House_Blk_No = row.House_Blk_No\r\n new_row_out.Road_Name = row.Road_Name\r\n new_row_out.Level_No = row.Level_No\r\n new_row_out.Unit_No = row.Unit_No\r\n new_row_out.Building_Name = row.Building_Name\r\n new_row_out.Postal_Code = row.Postal_Code\r\n new_row_out.Project_Lot_nos = row.Project_Lot_nos\r\n new_row_out.Permit_Type_of_Work = row.Permit_Type_of_Work\r\n new_row_out.Type_of_Work = row.Type_of_Work\r\n new_row_out.Owner_s_name = row.Owner_s_name\r\n new_row_out.Owner_s_firm_name = row.Owner_s_firm_name\r\n new_row_out.Owner_s_address = row.Owner_s_address\r\n new_row_out.Owner_s_Tel_No = row.Owner_s_Tel_No\r\n new_row_out.Owner_s_Email_address = row.Owner_s_Email_address\r\n new_row_out.Builder_s_name = row.Builder_s_name\r\n new_row_out.Builder_s_firm_name = row.Builder_s_firm_name\r\n new_row_out.Builder_s_address = row.Builder_s_address\r\n new_row_out.Builder_s_Tel_No = row.Builder_s_Tel_No\r\n new_row_out.Builder_s_email_address = row.Builder_s_email_address\r\n new_row_out.PE_s_name = row.PE_s_name\r\n new_row_out.PE_s_firm_name = row.PE_s_firm_name\r\n new_row_out.PE_s_address = row.PE_s_address\r\n new_row_out.PE_s_Tel_No = row.PE_s_Tel_No\r\n new_row_out.PE_s_Email_address = row.PE_s_Email_address\r\n new_row_out.Architect_s_name = row.Architect_s_name\r\n new_row_out.Architect_s_firm_name = row.Architect_s_firm_name\r\n new_row_out.Architect_s_address = row.Architect_s_address\r\n new_row_out.Architect_s_Tel_No = row.Architect_s_Tel_No\r\n new_row_out.Architect_s_Email_address = row.Architect_s_Email_address\r\n new_row_out.Project_Cost = row.Project_Cost\r\n new_row_out.Project_Duration = row.Project_Duration\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = row.Approval_Date_DD_MM_YYYY_\r\n rows_out.insertRow(new_row_out)\r\n if row:\r\n del row\r\n if new_row_out:\r\n del new_row_out\r\n if rows_in:\r\n del rows_in\r\n if rows_out:\r\n del rows_out\r\n except:\r\n log_error(\"Error in 10 Splitting of rows based on mukim: \", logFile)\r\n\r\n logFile.writelines(\"10 Splitting of rows based on mukim ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 11.Split rows based on Project lot numbers\r\n arcpy.AddMessage(\"Splitting rows based on project lots\")\r\n\r\n logFile.writelines(\r\n \"11 Splitting of rows based on project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rows_in03 = arcpy.SearchCursor(config.SplittedMukimRows)\r\n rows_out04 = arcpy.InsertCursor(config.SplittedProjLotRows)\r\n\r\n for row in rows_in03:\r\n list_proj_lot_nos = row.Project_Lot_nos.split(\",\")\r\n print list_proj_lot_nos\r\n for proj_lot_nos_id in list_proj_lot_nos:\r\n print proj_lot_nos_id\r\n new_row_out = rows_out04.newRow()\r\n new_row_out.Project_Lot_nos = proj_lot_nos_id\r\n new_row_out.PROJECTMUKIM_RAW = row.PROJECTMUKIM_RAW\r\n new_row_out.PROJECTLOT_RAW = row.Project_Lot_nos\r\n new_row_out.Project_Ref_No = row.Project_Ref_No\r\n new_row_out.Project_Title = row.Project_Title\r\n new_row_out.House_Blk_No = row.House_Blk_No\r\n new_row_out.Road_Name = row.Road_Name\r\n new_row_out.Level_No = row.Level_No\r\n new_row_out.Unit_No = row.Unit_No\r\n new_row_out.Building_Name = row.Building_Name\r\n new_row_out.Postal_Code = row.Postal_Code\r\n new_row_out.Project_Mukim_nos = row.Project_Mukim_nos\r\n new_row_out.Permit_Type_of_Work = row.Permit_Type_of_Work\r\n new_row_out.Type_of_Work = row.Type_of_Work\r\n new_row_out.Owner_s_name = row.Owner_s_name\r\n new_row_out.Owner_s_firm_name = row.Owner_s_firm_name\r\n new_row_out.Owner_s_address = row.Owner_s_address\r\n new_row_out.Owner_s_Tel_No = row.Owner_s_Tel_No\r\n new_row_out.Owner_s_Email_address = row.Owner_s_Email_address\r\n new_row_out.Builder_s_name = row.Builder_s_name\r\n new_row_out.Builder_s_firm_name = row.Builder_s_firm_name\r\n new_row_out.Builder_s_address = row.Builder_s_address\r\n new_row_out.Builder_s_Tel_No = row.Builder_s_Tel_No\r\n new_row_out.Builder_s_email_address = row.Builder_s_email_address\r\n new_row_out.PE_s_name = row.PE_s_name\r\n new_row_out.PE_s_firm_name = row.PE_s_firm_name\r\n new_row_out.PE_s_address = row.PE_s_address\r\n new_row_out.PE_s_Tel_No = row.PE_s_Tel_No\r\n new_row_out.PE_s_Email_address = row.PE_s_Email_address\r\n new_row_out.Architect_s_name = row.Architect_s_name\r\n new_row_out.Architect_s_firm_name = row.Architect_s_firm_name\r\n new_row_out.Architect_s_address = row.Architect_s_address\r\n new_row_out.Architect_s_Tel_No = row.Architect_s_Tel_No\r\n new_row_out.Architect_s_Email_address = row.Architect_s_Email_address\r\n new_row_out.Project_Cost = row.Project_Cost\r\n new_row_out.Project_Duration = row.Project_Duration\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = row.Approval_Date_DD_MM_YYYY_\r\n rows_out04.insertRow(new_row_out)\r\n\r\n if row:\r\n del row\r\n if new_row_out:\r\n del new_row_out\r\n if rows_in03:\r\n del rows_in03\r\n if rows_out04:\r\n del rows_out04\r\n # print int(arcpy.GetCount_management(SplittedProjLotRows).getOutput(0))\r\n except:\r\n log_error(\"Error in 11 Splitting of rows based on project lot: \", logFile)\r\n logFile.writelines(\r\n \"11 Splitting of rows based on project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 12. Remove spaces in Mukim and Project lot values\r\n logFile.writelines(\r\n \"12 Removing of spaces in mukim and project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"Cleaning project lots\")\r\n try:\r\n\r\n rowsSpaces = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsSpaces:\r\n lot_no_spaces = row.Project_Lot_nos.strip()\r\n mukim_no_spaces = row.Project_Mukim_nos.strip()\r\n row.Project_Lot_nos = lot_no_spaces\r\n row.Project_Mukim_nos = mukim_no_spaces\r\n rowsSpaces.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpaces:\r\n del rowsSpaces\r\n except:\r\n log_error(\"Error in 12 Removing of spaces in mukim and project lot: \", logFile)\r\n logFile.writelines(\r\n \"12 Removing of spaces in mukim and project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 13. Log empty Mukimlot or date fields\r\n logFile.writelines(\r\n \"13 Log empty mukim and project lot nos starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsEmpty = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsEmpty:\r\n message = ['Missing Project lot or Mukim numbers', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n message2 = ['Missing Project duration or Approval date', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name,\r\n row.Architect_s_firm_name, row.Architect_s_address, row.Architect_s_Tel_No,\r\n row.Architect_s_Email_address, row.Project_Cost, row.Project_Duration,\r\n row.Approval_Date_DD_MM_YYYY_]\r\n if row.Project_Mukim_nos is None or (len(row.Project_Mukim_nos) < 4):\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n elif row.Project_Lot_nos is None or (len(row.Project_Lot_nos) == 0):\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n if row.Project_Duration is None or (len(row.Project_Duration) < 1):\r\n col_count = 0\r\n for element in message2:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n\r\n elif row.Approval_Date_DD_MM_YYYY_ is None or (len(row.Approval_Date_DD_MM_YYYY_) < 1):\r\n col_count = 0\r\n for element in message2:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rowsEmpty:\r\n del rowsEmpty\r\n except:\r\n log_error(\"Error in 13 Log for empty mukim and project lot nos: \", logFile)\r\n logFile.writelines(\"13 Log empty mukim and project lot nos ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 14. Error log for those with bad values\r\n arcpy.AddMessage(\"14 Logging bad values\")\r\n logFile.writelines(\"14 Log if bad values exist starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsBadValues = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsBadValues:\r\n message = ['Mukim or Project lot numbers have bad values', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n if len(REGEX_FOR_INVALID_CHARS.findall(row.Project_Lot_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n elif len(REGEX_FOR_INVALID_CHARS.findall(row.Project_Mukim_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n elif len(uptodigit(row.Project_Lot_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n if row:\r\n del row\r\n if rowsBadValues:\r\n del rowsBadValues\r\n except:\r\n log_error(\"Error in 14 Log if bad values exist: \", logFile)\r\n logFile.writelines(\"14 Log if bad values exist ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 15. Add zeros for Project Lot numbers\r\n logFile.writelines(\"15 Add zeros starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsZeros = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n letters = string.ascii_letters\r\n for row in rowsZeros:\r\n letter_count = len(filter(functools.partial(operator.contains, letters), row.Project_Lot_nos))\r\n filled_string = row.Project_Lot_nos.zfill(5 + letter_count)\r\n row.Project_Lot_nos = filled_string\r\n rowsZeros.updateRow(row)\r\n if row:\r\n del row\r\n if rowsZeros:\r\n del rowsZeros\r\n except:\r\n log_error(\"Error in 15 Add zeros: \", logFile)\r\n logFile.writelines(\"15 Add zeros ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 16. Add and populate fields Mukim_Lot_No, Mukimlot_wo_letter, and Permit_date\r\n logFile.writelines(\"16 Add and populate fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsPop = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n for row in rowsPop:\r\n expression = str(row.Project_Mukim_nos) + \"-\" + str(row.Project_Lot_nos)\r\n row.Mukim_Lot_No = expression\r\n date = filedate.strftime(\"%Y%m%d\")\r\n year = int(date[:4])\r\n month = int(date[4:6])\r\n day = int(date[6:8])\r\n permit_date = datetime.datetime(year, month, day)\r\n row.Permit_date = permit_date\r\n rowsPop.updateRow(row)\r\n if row:\r\n del row\r\n if rowsPop:\r\n del rowsPop\r\n # Calculate Mukimlot_wo_letter\r\n arcpy.CalculateField_management(config.SplittedProjLotRows, \"Mukimlot_wo_letter\", \"!Mukim_Lot_No![:10]\",\r\n \"PYTHON_9.3\", \"\")\r\n\r\n except:\r\n log_error(\"Error in 16 Add and populate fields: \", logFile)\r\n logFile.writelines(\"16 Add and populate fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 17.Match mukim lot and land lot\r\n logFile.writelines(\"17 Match mukim lot with landlot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.MatchMukimLandLot()\r\n except:\r\n log_error(\"Error in 17 Match mukim lot with landlot: \", logFile)\r\n logFile.writelines(\"17 Match mukim lot with landlot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 18.Get unmatched mukim lot with land lot\r\n logFile.writelines(\"18 Get unmatched mukim lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"18 Get unmatched mukim lot\")\r\n try:\r\n arcpy.GetUnmatchedMukimLot()\r\n\r\n except:\r\n log_error(\"Error in 18 Get unmatched mukim lot: \", logFile)\r\n\r\n logFile.writelines(\"18 Get unmatched mukim lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 19. Log errors for unmatched mukim lots\r\n logFile.writelines(\"19 Log unmatched mukim lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsUnmatched = arcpy.SearchCursor(config.UnmatchedMukimLot)\r\n row = None\r\n\r\n for row in rowsUnmatched:\r\n message = ['Unmatched mukim lot with the land lot', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rowsUnmatched:\r\n del rowsUnmatched\r\n\r\n with xlrd.open_workbook(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\") as wb:\r\n sh = wb.sheet_by_index(0)\r\n if sh.nrows == 1:\r\n os.remove(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n except arcpy.ExecuteError:\r\n log_error(\"Error in 19 Log unmatched mukim lot: \", logFile)\r\n logFile.writelines(\"19 Log unmatched mukim lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 20. Prepare the table for MukimConstruct matching (add required fields)\r\n logFile.writelines(\"20 Add fields to be used for matching starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n if arcpy.Exists(config.MUKIMCONSTRUCTImport):\r\n arcpy.Delete_management(config.MUKIMCONSTRUCTImport)\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.TempDataGDB,\r\n \"MUKIM_CONSTRUCT_Import\")\r\n else:\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.TempDataGDB,\r\n \"MUKIM_CONSTRUCT_Import\")\r\n\r\n arcpy.AddField_management(config.MatchedMukimLot, \"Concat_4fields\", \"Text\", \"\", \"\", \"\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCTImport, \"Concat_4fields\", \"Text\", \"\", \"\", \"\")\r\n arcpy.AddField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS2\", \"Double\", \"\", \"\", \"\")\r\n except:\r\n log_error(\"Error in 20 Add fields to be used for matching: \", logFile)\r\n logFile.writelines(\"20 Add fields to be used for matching ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 21. Calculate Project Duration as months\r\n logFile.writelines(\"21 Calculate PROJ_DURATION as months starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsProjDur = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsProjDur:\r\n durationstr = row.PROJ_DURATION_MTHS\r\n if \"Month\" in row.PROJ_DURATION_MTHS:\r\n durationintmth = int(durationstr.split(' ')[0])\r\n row.PROJ_DURATION_MTHS2 = durationintmth\r\n elif \"Year\" in row.PROJ_DURATION_MTHS:\r\n durationintyr = int(durationstr.split(' ')[0]) * 12\r\n row.PROJ_DURATION_MTHS2 = durationintyr\r\n rowsProjDur.updateRow(row)\r\n if rowsProjDur:\r\n del rowsProjDur\r\n if row:\r\n del row\r\n\r\n arcpy.DeleteField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\")\r\n arcpy.AddField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\", \"Double\")\r\n arcpy.CalculateField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\", \"[PROJ_DURATION_MTHS2]\")\r\n except:\r\n log_error(\"Error in 21 Calculate PROJ_DURATION as months: \", logFile)\r\n logFile.writelines(\"21 Calculate PROJ_DURATION as months ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 22. Concatenate 4 fields to be used in checking if mukimlot already exists in MUKIMCONSTRUCT\r\n logFile.writelines(\"22 Concatenate 4 fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsConcat1 = arcpy.UpdateCursor(config.MUKIMCONSTRUCTImport)\r\n\r\n for row in rowsConcat1:\r\n expression = str(row.PROJ_REF_NO) + \"-\" + str(row.BUILDER_NAME) + \"-\" + str(\r\n row.LOT_KEY) + \"-\" + str(row.PERMIT_DATE)\r\n row.Concat_4fields = expression\r\n rowsConcat1.updateRow(row)\r\n if row:\r\n del row\r\n if rowsConcat1:\r\n del rowsConcat1\r\n\r\n rowsConcat2 = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsConcat2:\r\n expression = str(row.PROJ_REF_NO) + \"-\" + str(row.BUILDER_NAME) + \"-\" + str(\r\n row.LOT_KEY) + \"-\" + str(row.PERMIT_DATE)\r\n row.Concat_4fields = expression\r\n rowsConcat2.updateRow(row)\r\n if row:\r\n del row\r\n if rowsConcat2:\r\n del rowsConcat2\r\n except:\r\n log_error(\"Error in 22 Concatenate 4 fields: \", logFile)\r\n logFile.writelines(\"22 Concatenate 4 fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 23.Match mukim lot with mukim construct\r\n logFile.writelines(\"23 Match mukimlot with mukim construct at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.workspace = config.TempDataGDB # \"G:\\\\Project\\\\GERIUPGRADE\\\\GPTools\\\\NotificationSysTools\\\\BCAReportProcessing\\\\Temp_data.gdb\"\r\n try:\r\n arcpy.MatchedMukimlotMukimConstruct()\r\n except:\r\n log_error(\"Error in 23 Match mukimlot with mukim construct: \", logFile)\r\n logFile.writelines(\"23 Match mukimlot with mukim construct ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 24.Copy raw values to project lot and project mukim columns and delete the 2 fields\r\n logFile.writelines(\"24 Recalculate projlot and projmukim based on original values starts at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsRaw = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsRaw:\r\n row.PROJ_MUKIM_NOS = row.PROJECTMUKIM_RAW\r\n row.PROJ_LOT_NOS = row.PROJECTLOT_RAW\r\n rowsRaw.updateRow(row)\r\n if row:\r\n del row\r\n if rowsRaw:\r\n del rowsRaw\r\n except:\r\n log_error(\"Error in 24 Recalculate projlot and projmukim based on original values:\", logFile)\r\n logFile.writelines(\"24 Recalculate projlot and projmukim based on original values ends at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n\r\n # 25. Export Cleaned BCA Permit report for CWD\r\n logFile.writelines(\r\n \"25 Export of Cleaned BCA Permit report starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n # Initialize the file\r\n CleanedBCAPermitReport = xlwt.Workbook()\r\n book = CleanedBCAPermitReport.add_sheet('Book 1')\r\n countrow = 0\r\n countcol = 0\r\n fields = ['Project Ref No', 'Project Title', 'House Blk No', 'Road Name', 'Level No', 'Unit No',\r\n 'Building Name', 'Postal Code', 'Project Mukim nos', 'Project Lot nos', 'Permit Type of Work',\r\n 'Type of Work', \"Owner's name\", \"Owner's firm name\", \"Owner's address\", \"Owner's Tel No\",\r\n \"Owner's Email address\", \"Builder's name\", \"Builder's firm name\", \"Builder's address\",\r\n \"Builder's Tel No\", \"Builder's email address\", \"PE's name\", \"PE's firm name\", \"PE's address\",\r\n \"PE's Tel No\", \"PE's Email address\", \"Architect's name\", \"Architect's firm name\",\r\n \"Architect's address\", \"Architect's Tel No\", \"Architect's Email address\", 'Project Cost',\r\n 'Project Duration', 'Approval Date(DD/MM/YYYY)']\r\n for fieldname in fields:\r\n book.write(countrow, countcol, fieldname)\r\n countcol += 1\r\n CleanedBCAPermitReport.save(config.CleanedBCAPermitFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \".xls\")\r\n\r\n # Copy the data to Excel File\r\n data = arcpy.SearchCursor(config.MatchedMukimLot)\r\n\r\n countrow = 1\r\n countcol = 0\r\n for row in data:\r\n message = [row.PROJ_REF_NO, row.PROJ_TITLE, row.HOUSE_BLK_NO, row.ROAD_NAME, row.LEVEL_NO,\r\n row.UNIT_NO, row.BUILDING_NAME, row.POSTAL_CODE, row.PROJ_MUKIM_NOS, row.PROJ_LOT_NOS,\r\n row.PERMIT_WORK_TYPE, row.WORK_TYPE, row.OWNER_NAME, row.OWNER_FIRM_NAME, row.OWNER_ADDR,\r\n row.OWNER_TEL, row.OWNER_EMAIL, row.BUILDER_NAME, row.BUILDER_FIRM_NAME,\r\n row.BUILDER_ADDR, row.BUILDER_TEL, row.BUILDER_EMAIL, row.PE_NAME, row.PE_FIRM_NAME,\r\n row.PE_ADDR, row.PE_TEL, row.PE_EMAIL, row.ARCHITECT_NAME, row.ARCHITECT_FIRM_NAME,\r\n row.ARCHITECT_ADDR, row.ARCHITECT_TEL, row.ARCHITECT_EMAIL, row.PROJ_COST,\r\n row.PROJ_DURATION_MTHS, row.PROJ_APPROVAL_DATE]\r\n countcol = 0\r\n for element in message:\r\n book.write(countrow, countcol, element)\r\n countcol += 1\r\n countrow += 1\r\n CleanedBCAPermitReport.save(config.CleanedBCAPermitFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \".xls\")\r\n if row:\r\n del row\r\n if data:\r\n del data\r\n except:\r\n log_error(\"Error in 25 Export of Cleaned BCA Permit Report: Error in 26 Catchment calculation: \", logFile)\r\n logFile.writelines(\"25 Export of Cleaned BCA Permit Report ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 26. Catchment calculation\r\n arcpy.env.workspace = config.TempDataGDB\r\n logFile.writelines(\"26 Catchment calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.CatchmentCalculation()\r\n except:\r\n log_error(\"Error in 26 Catchment calculation: \", logFile)\r\n logFile.writelines(\"26 Catchment calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 27. Depot calculation\r\n logFile.writelines(\"27 Depot calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.DepotCalculation()\r\n except:\r\n log_error(\"Error in 27 Depot calculation: \", logFile)\r\n logFile.writelines(\"27 Depot calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 28. Re-add date fields and populate\r\n logFile.writelines(\"28 Re-add date fields and populate starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PERMIT_DATE\", \"Date\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PROJ_APPROVAL_DATE2\", \"Date\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PROJ_END_DATE\", \"Date\")\r\n\r\n rows = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rows:\r\n date = filedate.strftime(\"%Y%m%d\")\r\n year = int(date[:4])\r\n month = int(date[4:6])\r\n day = int(date[6:8])\r\n permit_date = datetime.datetime(year, month, day)\r\n row.PERMIT_DATE = permit_date\r\n row.PROJ_APPROVAL_DATE2 = datetime.datetime.strptime(row.PROJ_APPROVAL_DATE, '%d/%m/%Y')\r\n rows.updateRow(row)\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n except:\r\n log_error(\"Error in 28 Re-add fields and populate: \", logFile)\r\n logFile.writelines(\"28 Re-add fields and populate ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 29. Calculate the end date field\r\n logFile.writelines(\"29 Calculate the end date field starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n\r\n rowsEndDate = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rowsEndDate:\r\n sourcedate = row.PROJ_APPROVAL_DATE2\r\n # sourcedate = datetime.datetime.strptime(row.PROJ_APPROVAL_DATE2 , '%d/%m/%Y')\r\n months = int(row.PROJ_DURATION_MTHS)\r\n d = add_months(sourcedate, months)\r\n row.PROJ_END_DATE = d\r\n rowsEndDate.updateRow(row)\r\n if row:\r\n del row\r\n if rowsEndDate:\r\n del rowsEndDate\r\n except:\r\n log_error(\"Error in 29 Calculate the end date field: \", logFile)\r\n logFile.writelines(\"29 Calculate the end date field ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 30. Calculate Project Total Area\r\n logFile.writelines(\"30 Project total area calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.ProjectTotalArea()\r\n except:\r\n log_error(\"Error in 30 Project total area calculation: \", logFile)\r\n logFile.writelines(\"30 Project total area calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 31. Calculate the BCA_CORRECTED_BY\r\n logFile.writelines(\"31 Calculate the BCA_CORRECTED_BY starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rows_BCA_CB = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rows_BCA_CB:\r\n if \"\\WSN\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"WSN\"\r\n elif \"\\WRN\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"WRN\"\r\n elif \"\\CWD\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"CWD\"\r\n rows_BCA_CB.updateRow(row)\r\n if row:\r\n del row\r\n if rows_BCA_CB:\r\n del rows_BCA_CB\r\n except:\r\n log_error(\"Error in 31 Calculate the BCA_CORRECTED_BY: \", logFile)\r\n\r\n # 32. Remove spaces in PROJ_REF_NO\r\n logFile.writelines(\r\n \"32 Removing of spaces in mukim and project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsSpaces = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rowsSpaces:\r\n lot_no_spaces = row.PROJ_REF_NO.strip()\r\n row.PROJ_REF_NO = lot_no_spaces\r\n rowsSpaces.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpaces:\r\n del rowsSpaces\r\n except:\r\n log_error(\"Error in 32 Removing of spaces in mukim and project lot: \", logFile)\r\n logFile.writelines(\r\n \"32 Removing of spaces in mukim and project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 33. Process the Mukim Construct by Project\r\n logFile.writelines(\r\n \"33 Process the Mukim Construct by Project starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.overwriteOutput = True\r\n try:\r\n MUKIM_CONSTRUCT_BYPROJ_IMPORT = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_IMPORT\"\r\n MUKIMCONBYPROJ_SORT = config.TempDataGDB + \"\\\\MUKIMCONBYPROJ_SORT\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS__2_ = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n\r\n if arcpy.Exists(MUKIM_CONSTRUCT_BYPROJ_IMPORT):\r\n arcpy.Delete_management(MUKIM_CONSTRUCT_BYPROJ_IMPORT)\r\n if arcpy.Exists(MUKIMCONBYPROJ_SORT):\r\n arcpy.Delete_management(MUKIMCONBYPROJ_SORT)\r\n if arcpy.Exists(MUKIM_CONSTRUCT_BYPROJ_DISS):\r\n arcpy.Delete_management(MUKIM_CONSTRUCT_BYPROJ_DISS)\r\n\r\n arcpy.MUKIMCONBYPROJ()\r\n # arcpy.MUKIMCONSTRUCTBYPROJProcess2()\r\n\r\n arcpy.Sort_management(MUKIM_CONSTRUCT_BYPROJ_IMPORT, MUKIMCONBYPROJ_SORT, \"PROJ_END_DATE DESCENDING\",\r\n \"UR\")\r\n arcpy.Dissolve_management(MUKIMCONBYPROJ_SORT, MUKIM_CONSTRUCT_BYPROJ_DISS, \"PROJ_REF_NO\",\r\n \"LOT_KEY FIRST;PROJ_REF_NO FIRST;PROJ_TITLE FIRST;HOUSE_BLK_NO FIRST;ROAD_NAME FIRST;POSTAL_CODE FIRST;LEVEL_NO FIRST;UNIT_NO FIRST;BUILDING_NAME FIRST;PROJ_MUKIM_NOS FIRST;PROJ_LOT_NOS FIRST;PERMIT_WORK_TYPE FIRST;WORK_TYPE FIRST;OWNER_NAME FIRST;OWNER_FIRM_NAME FIRST;OWNER_ADDR FIRST;OWNER_TEL FIRST;OWNER_EMAIL FIRST;BUILDER_NAME FIRST;BUILDER_FIRM_NAME FIRST;BUILDER_ADDR FIRST;BUILDER_TEL FIRST;BUILDER_EMAIL FIRST;PE_NAME FIRST;PE_FIRM_NAME FIRST;PE_ADDR FIRST;PE_TEL FIRST;PE_EMAIL FIRST;ARCHITECT_NAME FIRST;ARCHITECT_FIRM_NAME FIRST;ARCHITECT_ADDR FIRST;ARCHITECT_TEL FIRST;ARCHITECT_EMAIL FIRST;PROJ_TOT_AREA FIRST;PROJ_PARENT_CWDCATCHMENT FIRST;PROJ_PARENT_WSNDEPOT FIRST;PROJ_PARENT_WRPCATCHMENT FIRST;BCA_CORRECTED_BY FIRST;PROJ_DURATION_MTHS FIRST;PROJ_COST FIRST\",\r\n \"MULTI_PART\", \"DISSOLVE_LINES\")\r\n arcpy.JoinField_management(MUKIM_CONSTRUCT_BYPROJ_DISS, \"FIRST_PROJ_REF_NO\", MUKIMCONBYPROJ_SORT,\r\n \"PROJ_REF_NO\", \"PROJ_APPROVAL_DATE;PROJ_END_DATE;PERMIT_DATE\")\r\n arcpy.CalculateField_management(MUKIM_CONSTRUCT_BYPROJ_DISS__2_, \"FIRST_PROJ_TOT_AREA\",\r\n \"[Shape_Area]/10000\", \"VB\", \"\")\r\n\r\n except:\r\n log_error(\"Error in 33 Process the Mukim Construct by Project: \", logFile)\r\n logFile.writelines(\r\n \"33 Process the Mukim Construct by Project ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"33 END process MUKIM CONSTRUCT\")\r\n\r\n # 34. Filter on-going projects\r\n\r\n logFile.writelines(\"34 Filter on-going projects starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n # TempDataGDB = \"G:\\\\Project\\\\GERIUPGRADE\\\\GPTools\\\\NotificationSysTools\\\\BCAReportProcessing\\\\Temp_data.gdb\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n rowsIn = arcpy.UpdateCursor(MUKIM_CONSTRUCT_BYPROJ_DISS)\r\n\r\n row = None\r\n for row in rowsIn:\r\n strdays = str(row.PROJ_END_DATE.date() - datetime.date.today())\r\n splitDays = strdays.split()\r\n if splitDays[0] == '0:00:00':\r\n result = \"On-going project (but will end today)\"\r\n else:\r\n if int(splitDays[0]) < 0:\r\n rowsIn.deleteRow(row)\r\n else:\r\n result = \"On-going project\"\r\n if rowsIn:\r\n del rowsIn\r\n if row:\r\n del row\r\n\r\n except:\r\n log_error(\"Error in 34 Filter on-going projects: \", logFile)\r\n logFile.writelines(\"34 Filter on-going projects ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 35. Append the new data to MUKIM_CONSTRUCT\r\n logFile.writelines(\r\n \"35 Append the new data to MUKIM_CONSTRUCT starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.AppendNewData()\r\n except:\r\n log_error(\"Error in 35 Append the new data to MUKIM_CONSTRUCT: \", logFile)\r\n logFile.writelines(\r\n \"35 Append the new data to MUKIM_CONSTRUCT ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # Clean the memory and the schema lock\r\n arcpy.RefreshCatalog(config.Notification)\r\n arcpy.Compact_management(config.TempDataGDB)\r\n gc.collect()\r\n\r\n # Status update to run/not run the SiteInspection Update\r\n Log_SiteInspectionUpdate = file(config.SiteInspectionUpdate, \"w\")\r\n Log_SiteInspectionUpdate.writelines(\"YES\")\r\n Log_SiteInspectionUpdate.close()\r\n\r\n arcpy.AddMessage(\"END BCA Processing\")\r\n arcpy.AddMessage(\"Passing file date to other functions: \" + repr(filedate))\r\n\r\n # Generate Report\r\n import ReportGeneration_Adhoc_WithProjects as gen_report\r\n gen_report.run(filedate)\r\n #\r\n # # Send email to departments\r\n # import EmailGenerationCompletion_adhoc as send_dept_notification\r\n # if \"CORRECTED\" in BCAreport.upper():\r\n # send_dept_notification.run(filedate, corrected=True)\r\n # else:\r\n # send_dept_notification.run(filedate)\r\n\r\n # Generate advisory letters\r\n import LetterGeneration as letter_gen\r\n letter_gen.run(filedate)\r\n #\r\n # # Send letters to project team\r\n # import EmailGeneration as send_advisory_email\r\n # send_advisory_email.run(filedate)\r\n\r\n\r\n # 36. Move the BCAReport in the backup folder\r\n for BCAreport in correct_config_files:\r\n\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n bk_file_path = os.path.join(config.BCAreportBackupFolder, input_file_name)\r\n\r\n # if the same file name exists in the backup folder, rename the new file with timestamp and move\r\n if os.path.exists(bk_file_path):\r\n\r\n new_filename = datetime.datetime.now().strftime(\"%Y%m%d-%H%M\") + input_file_name\r\n new_filepath = os.path.join(config.BCAreportBackupFolder, new_filename)\r\n shutil.copy(BCAreport, new_filepath)\r\n os.remove(BCAreport)\r\n\r\n # if the filename does not exist in the backup folder, move the file to backup\r\n else:\r\n shutil.move(BCAreport, config.BCAreportBackupFolder)\r\n\r\n logFile.writelines(\"Moved the BCA report to the backup folder at \" + str(datetime.datetime.now()) + \"\\n\")\r\n logFile.close()", "def PyRana_WE(data_path, output_path, date_specs):\n\n (start_month, start_year, end_month, end_year) = date_specs\n months_in_series = ((end_year - start_year) * 12) + (end_month - start_month)\n\n # start and enddate as datetimes for datetime functionality\n start_date = datetime(start_year, start_month, 1).date()\n end_date = datetime(end_year, end_month, 1).date()\n\n # at each timestep, keep track of how negative the depletion has gotten\n depletion_ledger = np.zeros((2525, 2272), dtype=float)\n # initialize depletion counter at zero.\n depletion_counter = np.zeros((2525, 2272), dtype=float)\n # keep track of the maximum depletion map\n max_depletion = np.zeros((2525, 2272), dtype=float)\n # to compare with SSEB\n total_eta = np.zeros((2525, 2272), dtype=float)\n\n for i in range(months_in_series + 1):\n\n # count up from the start date by months...\n date = start_date + relativedelta(months=+i)\n\n precip = os.path.join(data_path, \"tot_precip_{}_{}.tif\".format(date.month, date.year))\n eta = os.path.join(data_path, \"tot_eta_{}_{}.tif\".format(date.month, date.year))\n\n # array, transform, dimensions, projection, data type\n precip_arr, transform, dim, proj, dt = raster_extract(precip)\n eta_arr, transform, dim, proj, dt = raster_extract(eta)\n\n total_eta += eta_arr\n\n # this month's change in depletion\n depletion_delta = depletion_calc(eta_arr, precip_arr)\n\n # add to the running depletion tally\n print depletion_delta.shape\n print depletion_counter.shape\n depletion_counter += depletion_delta\n depletion_ledger += depletion_delta\n\n # for any values that become negative, make them zero. Assume runoff...Wang-Erlandsson (2016)\n # todo - uncomment to ONLY allow positive depletions\n depletion_counter[depletion_counter < 0.0] = 0.0\n\n # newmax_bool = [depletion_counter > max_depletion]\n # newmax = depletion_counter[newmax_bool == True]\n newmax = np.maximum(depletion_counter, max_depletion)\n\n max_depletion = newmax\n\n # for each monthly timestep, take the cumulative depletion condition and output it as a raster\n depletion_name = \"pyrana_cumulative_depletion_{}_{}.tif\".format(date.year, date.month)\n write_raster(depletion_counter, transform, output_path, depletion_name, dim, proj, dt)\n\n # output the maximum depletion\n max_depletion_name = 'pyrana_max_depletion_{}_{}.tif'.format(start_date.year, end_date.year)\n write_raster(max_depletion, transform, output_path, max_depletion_name, dim, proj, dt)\n\n # output total SSEBop (to test wheter it looks like the netcdf file)\n total_eta_name = \"total_eta_{}_{}.tif\".format(start_date.year, end_date.year)\n write_raster(total_eta, transform, output_path, total_eta_name, dim, proj, dt)", "def run_pycma(self):\n\n self.mug_pipeline.set_folder_names(self.folder_name)\n self.mug_pipeline.set_optimizer_type(OptimizerType.PYCMA)\n\n self.mug_initial_poses = []\n\n for i in range(self.num_mugs):\n self.mug_initial_poses += \\\n RollPitchYaw(np.random.uniform(0.0, 2.0*np.pi, size=3)).ToQuaternion().wxyz().tolist() + \\\n [np.random.uniform(-0.1, 0.1), np.random.uniform(-0.1, 0.1), np.random.uniform(0.1, 0.2)]\n\n print(self.mug_initial_poses, flush=True)\n\n iter_num = 0\n\n start_time = time.time()\n elapsed_time = 0\n\n manager = Manager()\n self.all_probabilities = manager.list()\n all_probabilities_lock = manager.Lock()\n\n self.total_iterations = manager.Value('d', 0)\n self.num_counterexamples = manager.Value('d', 0)\n\n self.model_number = manager.Value('d', 0)\n model_number_lock = manager.Lock()\n\n counter_lock = manager.Lock()\n\n file_q = manager.Queue()\n\n filename = '{}/results.csv'.format(self.folder_name)\n watcher = Process(target=self.listener, args=(file_q, filename))\n watcher.start()\n\n # TODO: share GPU for inference using model.share_memory()\n\n es = cma.CMAEvolutionStrategy(self.mug_initial_poses, 1.0/3.0,\n {'bounds': [-1.0, 1.0], 'verb_disp': 1, 'popsize': self.num_processes})\n\n while not es.stop():\n try:\n ep = EvalParallel3(self.run_inference, number_of_processes=self.num_processes)\n lst = range(iter_num, iter_num + self.num_processes)\n X = es.ask()\n elapsed_time = time.time() - start_time\n jobs = ep(X, lst=lst, args=(self.mug_pipeline, self.all_probabilities,\n self.total_iterations, self.num_counterexamples,\n self.model_number, model_number_lock, counter_lock,\n all_probabilities_lock, file_q))\n except FoundCounterexample:\n print('FOUND COUNTEREXAMPLE EXCEPTION', flush=True)\n self.mug_initial_poses = []\n\n for i in range(self.num_mugs):\n self.mug_initial_poses += \\\n RollPitchYaw(np.random.uniform(0., 2.*np.pi, size=3)).ToQuaternion().wxyz().tolist() + \\\n [np.random.uniform(-0.1, 0.1), np.random.uniform(-0.1, 0.1), np.random.uniform(0.1, 0.2)]\n\n es = cma.CMAEvolutionStrategy(self.mug_initial_poses, 1.0/3.0,\n {'bounds': [-1.0, 1.0], 'verb_disp': 1, 'popsize': self.num_processes})\n # except torch.multiprocessing.context.TimeoutError:\n # print('timed out!', flush=True)\n # break\n except FoundMaxCounterexamples:\n print('found {} counterexamples!'.format(self.max_counterexamples), flush=True)\n break\n except:\n print(\"Unhandled unnamed exception in pycma\", flush=True)\n raise\n\n iter_num += self.num_processes\n torch.cuda.empty_cache()\n print('calling ep.terminate()', flush=True)\n ep.terminate()\n\n elapsed_time = time.time() - start_time\n print('ran for {} minutes! total number of iterations is {}, with {} sec/image'.format(\n elapsed_time/60.0, self.total_iterations.value, elapsed_time/self.total_iterations.value), flush=True)\n file_q.put('kill')\n print('probabilities:', self.all_probabilities, flush=True)\n es.result_pretty()\n\n sys.stdout.flush()", "def cleanup(adata, del_prediction=False, del_2nd_moments=False):\n\n if \"pca_fit\" in adata.uns_keys():\n adata.uns[\"pca_fit\"] = None\n if \"velocyto_SVR\" in adata.uns_keys():\n adata.uns[\"velocyto_SVR\"][\"SVR\"] = None\n if \"umap_fit\" in adata.uns_keys():\n adata.uns[\"umap_fit\"][\"fit\"] = None\n if \"velocity_pca_fit\" in adata.uns_keys():\n adata.uns[\"velocity_pca_fit\"] = None\n if \"kmc\" in adata.uns_keys():\n adata.uns[\"kmc\"] = None\n if \"kinetics_heatmap\" in adata.uns_keys():\n adata.uns.pop(\"kinetics_heatmap\")\n if \"hdbscan\" in adata.uns_keys():\n adata.uns.pop(\"hdbscan\")\n\n VF_keys = [i if i.startswith(\"VecFld\") else None for i in adata.uns_keys()]\n for i in VF_keys:\n if i is not None and \"VecFld2D\" in adata.uns[i].keys():\n del adata.uns[i][\"VecFld2D\"]\n\n fate_keys = [i if i.startswith(\"fate\") else None for i in adata.uns_keys()]\n for i in fate_keys:\n if i is not None:\n if adata.uns[i][\"init_cells\"] is not None:\n adata.uns[i][\"init_cells\"] = list(adata.uns[i][\"init_cells\"])\n if \"prediction\" in adata.uns[i].keys():\n if del_prediction:\n del adata.uns[i][\"prediction\"]\n if \"VecFld_true\" in adata.uns[i].keys():\n if adata.uns[i][\"VecFld_true\"] is not None:\n del adata.uns[i][\"VecFld_true\"]\n\n if del_2nd_moments:\n from .tools.utils import remove_2nd_moments\n\n remove_2nd_moments(adata)\n\n return adata", "def _analyse(self, source='sdf', alpha = 0.05, n_bootstrap = 2000, \n\t\tbiphase_split_point = 0.5, biphase_select_resp = None):\n\t\t\n\t\t\n\t\t# Need to add capacity to handle two things:\n\t\t# Qualitative conditions ne\n\t\t# Conditions split - need to to deal with splitting a single dataset, where one part\n\t\t# is qualitiative and the other quantitative\n\t\t\n\t\t# For qualitative, the self.cond_tuning array is numerical. Replace with record\n\t\t# see Initial Chrom Analysis. Keep conditions as strings, and convert to numerical\n\t\t# for plotting (?). Where qualitative, only use bar plot, where mixed, split.\n\t\t## Add parameters to parameters dictionary\n\n\t\tself.parameters['biphase_split_point'] = biphase_split_point\n\t\tself.parameters['biphase_select_resp'] = biphase_select_resp\n\t\t\n\t\t# Organising source selection - raw and mov_avg not develoepd fully yet.\n\t\tsources = {'sdf': (self.spike_dens_func, self.CI_pos, self.CI_neg), \n\t\t\t\t 'mov_avg': 'doesnt exist yet, call it self.spike_mov_avg', \n\t\t\t\t 'raw': (self.conditions_hist_mean, \n\t\t\t\t\t\t self.conditions_hist_mean + 2*self.conditions_hist_stderr, \n\t\t\t\t\t\t self.conditions_hist_mean - 2*self.conditions_hist_stderr)}\n\t\t\t\t \n\t\tassert source.lower() in sources.keys(), ('Tuning source data \"%s\" is invalid '\n\t\t\t\t\t\t\t\t\t\t\t\t\t'select one of %s' %(source, sources.keys())) \n\t \n\t\t## Need to expand this functionality to the mean and CI_pos and CI_neg. Doing so for\n\t # raw and moving average is not a priority, using sdf and bootstrap is pretty good.\n\t # overall aim is to clean this function up to accomadte a number of tuning functinos\n\t # in a clear and easy to use fasion.\n\t \n\t\tn_con = self.parameters['conditions']\n\t\t\n\t\t# values for transient bar responses\n\t\tif self.parameters['stimulus'] == 'bar':\n\t\t\t\n\t\t\tresp, CI_pos, CI_neg = sources[source.lower()]\n\t\t\t\n\t\t\t\n\t\t\tif self.parameters['biphasic']:\n\t\t\t\t\n\t\t\t\t# Take max response for each half of each PSTH, including Conf Intvls\n\t\t\t\thalf = int(self.bins.size * biphase_split_point)\n\n\t\t\t\tmax_val_arg = (resp[:, :half].argmax(axis=1),\n\t\t\t\t\t\t\t resp[:, half:].argmax(axis=1)+half)\n\t\t\t\t\t\t\t\t\t\n\t\t\t\tmax_val = (resp[:, :half].max(axis=1),\n\t\t\t\t\t\t resp[:, half:].max(axis=1))\n\t\t\t\t\t\t \n\t\t\t\t\t\t\t \n\t\t\t\tmax_val_CI_neg = (CI_neg[np.arange(n_con), max_val_arg[0]],\n\t\t\t\t\t\t\t\t CI_neg[np.arange(n_con), max_val_arg[1]])\n\t\t\t\t\t\t\t\t \n\t\t\t\tmax_val_CI_pos = (CI_pos[np.arange(n_con), max_val_arg[0]],\n\t\t\t\t\t\t\t\t CI_pos[np.arange(n_con), max_val_arg[1]])\n\n\t\t\t\t# encode which of the two responses the data is attached to\n\t\t\t\tbiphas_id = np.zeros_like(np.hstack((self.conditions, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.conditions2)))\n\t\t\t\tbiphas_id[:self.conditions.size] = 1\n\t\t\t\tbiphas_id[self.conditions2.size:] = 2\n\n\n\t\t\t\t\t\t\t\t \n\t\t\t\tself.cond_tuning = np.vstack((np.hstack((self.conditions, \n\t\t\t\t\t\t\t\t\t\t\t\t\t\tself.conditions2)),\n\t\t\t\t\t\t\t\t\t\t\t np.hstack(max_val),\n\t\t\t\t\t\t\t\t\t\t\t np.hstack(max_val_CI_neg),\n\t\t\t\t\t\t\t\t\t\t\t np.hstack(max_val_CI_pos),\n\t\t\t\t\t\t\t\t\t\t\t biphas_id))\n\t\t\t\t\t\t\t\t\t\t\t \n\t\t\t\t# Convert to Hertz - design choice is to keep all PSTH datasets as raw average spike\n\t\t\t\t# counts, with easy option of seeing frequency in the plotting, but converting to \n\t\t\t\t# Hertz for all condition tuning data.\n\t\t\t\tself.cond_tuning[1:-1,:] *= (1/self.bin_width)\n\t\t\t\t\t\t\t\t\t\t\t \n\t\t\t\t\t\t\t \n\t\t\t\t# Column labels for pd.dataframe of tuning data\n\t\t\t\t# Percentage of confidence intervals\n\t\t\t\t# ci_perc = (100 * (1 - self.parameters['sdf_alpha']))\n\t\t\t\t\n\t\t\t\t# Labels\n\t\t\t\tidx = ['condition', 'max_resp', 'neg_CI', 'pos_CI', 'biphas_id']\n\t\t\t\t\n\t\t\t\t# Pandas object, with transpose of tuning array to data frame object \n\t\t\t\tself.cond_tuning_pd = pd.DataFrame(self.cond_tuning.transpose(), columns=idx)\n\t\t\n\t\t\t#non biphasic version of above\n\t\t\tif not self.parameters['biphasic']:\n\n\t\t\t\tmax_val_arg = resp[:, :].argmax(axis=1)\n\t\t\t\t\t\t\t\t\t\n\t\t\t\tmax_val = resp[:, :].max(axis=1)\n\t\t\t\t\t\t \n\t\t\t\t\t\t\t \n\t\t\t\tmax_val_CI_neg = CI_neg[np.arange(n_con), max_val_arg]\n\t\t\t\t\t\t\t\t \n\t\t\t\tmax_val_CI_pos = CI_pos[np.arange(n_con), max_val_arg]\n\t\t\t\t\t\t\t\t \n\t\t\t\tself.cond_tuning = np.vstack((self.conditions,\n\t\t\t\t\t\t\t\t\t\t\t max_val,\n\t\t\t\t\t\t\t\t\t\t\t max_val_CI_neg,\n\t\t\t\t\t\t\t\t\t\t\t max_val_CI_pos))\n\t\t\t\t\t\t\t\t\t\t\t \n\t\t\t\t# Convert to Hertz - design choice is to keep all PSTH datasets as raw average spike\n\t\t\t\t# counts, with easy option of seeing frequency in the plotting, but converting to \n\t\t\t\t# Hertz for all condition tuning data.\n\t\t\t\tself.cond_tuning[1:,:] *= (1/self.bin_width)\n\t\t\t\t\n\t\t\t\t\n\t\t\t\t# Column labels for pd.dataframe of tuning data\n\t\t\t\t# ci_perc = (100 * (1 - self.parameters['sdf_alpha']))\n\n\t\t\t\tidx = ['condition', 'max_resp', 'neg_CI', 'pos_CI']\n\n\t\t\t\t\t \n\t\t\t\t# transpose of tuning array to data frame object \n\t\t\t\tself.cond_tuning_pd = pd.DataFrame(self.cond_tuning.transpose(), columns=idx)\n\t\t\n\t\t\n\t\t# values for sinusoids/gratings\n\t\t## Note issue of temporal frequency tuning - need variable tf.\n\t\tif self.parameters['stimulus'] == 'grating':\n\t\t\t\n\t\t\tself.parameters['fft_alpha'] = alpha\n\t\t\tself.parameters['fft_number_bootstrap'] = n_bootstrap\n\t\t\t\n\t\t\tif source == 'sdf':\n\t\t\t\tprint ('WARNING, using a smoothed/filtered dataset will artificially increase'\n\t\t\t\t\t 'the amplitude of the DC component and decrease that of the F1') \n\t\t\t\n\t\t\tsources = {'sdf': self.conditions_trials_sdf,\n\t\t\t\t\t 'mov_avg': \"doesn't exist yet (?)\",\n\t\t\t\t\t 'raw': self.conditions_trials_hist}\n\t\t\t\n\t\t\tresp = sources[source]\n\t\t\t\n\t\t\ttemp_freq = self.parameters['temp_freq']\n\t\t\tstim_len = self.parameters['stimulus_length']\n\t\t\t\n\t\t\t# ensuring that the temp_freq is measured in the FFT whilst taking the maximum time.\n\t\t\t# on the basis of delt-f = 1 / n*del-t; stim_len*F1=factor; 1/(bin_width*F1)=min bins\n\t\t\t# number times greater than minimum can fit in stim_length \n\t\t\tfactor = np.floor(stim_len * temp_freq).astype('int')\n\t\t\t\n\t\t\t# number of bins to take - the window size necessary for temp_freq to be measured\n\t\t\tbins_take = np.floor(factor / (self.bin_width * temp_freq)).astype('int')\n\n\t\t\t# Frequency axis generation\n\t\t\tself.freq = fft.rfftfreq(bins_take, self.bin_width)\n\t\t\t\n\t\t\t#Checkign whether the temp_freq is in the FFT.\n\t\t\tassert self.freq[factor] == temp_freq, ('The calculated FFT F1 frequency (%s)'\n\t\t\t\t\t\t\t\t\t\t\t\t\t 'does not equal the Stimulus temp_freq (%s)'\n\t\t\t\t\t\t\t\t\t\t\t\t\t %(self.freq[bins_take], temp_freq))\n\n\t\t\t# Fourier Transform\n\t\t\tself.conditions_trials_fourier = fft.rfft(resp[:,:,:bins_take], axis=2)\n\t\t\t\n\t\t\t# Amplitude (peak-to-peak)\n\t\t\tself.conditions_trials_ampl = np.abs(self.conditions_trials_fourier)\n\t\t\t\n\t\t\t# normalising to dataset size, except the DC.\n\t\t\tself.conditions_trials_ampl[:,:,0] *= 1 / float(bins_take)\n\t\t\tself.conditions_trials_ampl[:,:,1:] *= 2 / float(bins_take)\n\t\t\t\n\t\t\t\n\t\t\t# Mean amplitudes and bootstrapped CI_intervals \n\t\t\tself.conditions_ampl_mean = np.mean(self.conditions_trials_ampl, axis=1)\n\t\t\t\n\t\t\tCI_pos, CI_neg = bootstrap(self.conditions_trials_ampl, alpha=alpha, \n\t\t\t\t\t\t\t\t\t n_bootstrap=n_bootstrap)\n\t\t\tself.conditions_ampl_CI_pos, self.conditions_ampl_CI_neg = CI_pos, CI_neg\n\t\t\t\n\t\t\t# isolating F0, F1, and F2 responses and compiling into a single table.\n\t\t\tconditions_f0 = self.conditions_ampl_mean[:,0]\n\t\t\tconditions_f1 = self.conditions_ampl_mean[:,factor]\n\t\t\tconditions_f2 = self.conditions_ampl_mean[:,2*factor]\n\t\t\t\n\t\t\t# Condition Tuning array\n\t\t\tself.cond_tuning = np.vstack((self.conditions,\n\t\t\t\t\t\t\t\t\t\t conditions_f0, CI_pos[:,0], CI_neg[:,0],\n\t\t\t\t\t\t\t\t\t\t conditions_f1, CI_pos[:,factor], CI_neg[:,factor],\n\t\t\t\t\t\t\t\t\t\t conditions_f2, CI_pos[:,2*factor], CI_neg[:,2*factor],\n\t\t\t\t\t\t\t\t\t\t conditions_f1/conditions_f0))\n\t\t\t\n\t\t\t# Convert to Hertz - design choice is to keep all PSTH datasets as raw average spike\n\t\t\t# counts, with easy option of seeing frequency in the plotting, but converting to \n\t\t\t# Hertz for all condition tuning data.\n\t\t\t\n\t\t\tself.cond_tuning[1:-1,:] *= (1/self.bin_width)\n\t\t\t\n\t\t\t# Column labels for pd.dataframe of tuning data\n\t\t\t# ci_perc = (100 * (1 - self.parameters['fft_alpha']))\n\t\t\tidx = ['conditions', \n\t\t\t\t 'F0', 'F0_pos_CI', 'F0_neg_CI', \n\t\t\t\t 'F1', 'F1_pos_CI', 'F1_neg_CI',\n\t\t\t\t 'F2', 'F2_pos_CI', 'F2_neg_CI',\n\t\t\t\t 'F1/F0_ratio']\n\t\t\t# transpose of tuning array to data frame object \n\t\t\tself.cond_tuning_pd = pd.DataFrame(self.cond_tuning.transpose(), columns=idx)\n\t\t\n\t\t\n\t\t\t \n\t\t# for orientation data, the orientation angles can get scrambled due to the circ() function\n\t\t# rotating the angles around. This orders them numerically in the final cond_tuning\n\t\t\n\t\tif self.parameters['condition_type'] == 'orientation':\n\t\t\tself.cond_tuning = self.cond_tuning[:,self.cond_tuning[0].argsort()]\n\t\t\tself.cond_tuning_pd.sort_values(self.cond_tuning_pd.columns[0], inplace=True)\n\n\n\t\t# \n\t\t# cond_tuning cleaning up and inserting important meta data / columns\n\t\t# \n\n\t\tif biphase_select_resp is not None:\n\t\t\tassert isinstance(biphase_select_resp, int) and biphase_select_resp in [1,2], \\\n\t\t\tf'biphase_select_resp ({biphase_select_resp}) must be an integer of 1 or 2'\n\n\t\t\tassert self.parameters['biphasic'], 'Stimulus not analysed as biphasic'\n\n\t\t\t# cond tuning array\n\t\t\tcond_tuning_biphase_mask = self.cond_tuning[4,:] == biphase_select_resp\n\t\t\tself.cond_tuning = self.cond_tuning[:, cond_tuning_biphase_mask]\n\n\t\t\t# cond tuning pandas dataframe\n\t\t\tself.cond_tuning_pd = self.cond_tuning_pd.query('biphas_id == @biphase_select_resp')\n\n\n\n\t\tassert hasattr(self, 'CELL_ID'), 'Make Cell ID first'\n\n\n\t\tself.cond_tuning_pd.insert(0, 'run_key', self.RUN_KEY)\n\t\tself.cond_tuning_pd.insert(0, 'cell_key', self.CELL_KEY)\n\t\tself.cond_tuning_pd.set_index(['cell_key', 'run_key'], inplace=True)\n\n\t\tself.cond_tuning_pd.insert(0, 'cond_type', self.parameters['condition_type'])\n\t\tself.cond_tuning_pd.insert(1, 'cond_unit', self.parameters['condition_unit'])", "def initializeData():\n\n # Read in the CSV\n allX = pd.read_csv('completeData.csv', keep_default_na=False)\n xValues = pd.read_csv('formattedXValues.csv')\n filename = \"completeData.csv and formattedXValues.csv\"\n\n # Separate the CSV columns into array variables and numpy vars to store new categorical variables\n mixNum = allX['Mix Number']\n mixP = allX['Mix Proportion']\n mixPFinal = np.empty(len(mixP))\n scm = allX['SCM']\n scmFinal = np.empty(len(scm))\n fineA = allX['Fine Aggregate']\n fineAFinal = np.empty(len(fineA))\n coarseA = allX['Coarse Aggregate']\n coarseAFinal = np.empty(len(coarseA))\n\n # Loop through every mix in the csv file\n # Not sure how to do 3 different variables\n for y in range(0, len(mixNum)):\n # Sort Mix Proportions\n if mixP[y] == \"A-F\":\n mixPFinal[y] = 2\n elif mixP[y] == \"A-S\":\n mixPFinal[y] = 1\n elif mixP[y] == \"A\":\n mixPFinal[y] = 0\n else:\n print('Unidentified Variable in mixP: ')\n print(mixP[y])\n\n # Sort SCM into slag or fly ash\n if scm[y] == 'N/A':\n scmFinal[y] = 1000\n elif scm[y] == 'Slag 1':\n scmFinal[y] = 0\n elif scm[y] == 'Slag 2':\n scmFinal[y] = 0\n elif scm[y] == 'Fly Ash 1':\n scmFinal[y] = 1\n elif scm[y] == 'Fly Ash 2':\n scmFinal[y] = 1\n elif scm[y] == 'Fly Ash 3':\n scmFinal[y] = 1\n else:\n print('Unidentified Variable in scm: ')\n print(scm[y])\n\n # Sort the fine aggregate\n if fineA[y] == 'Sand A':\n fineAFinal[y] = 0\n elif fineA[y] == 'Sand B':\n fineAFinal[y] = 1\n else:\n print('Unidentified Variable in fineA: ')\n print(fineA[y])\n\n # Sort the coarse aggregate\n if coarseA[y] == 'GG1':\n coarseAFinal[y] = 0\n elif coarseA[y] == 'GG2':\n coarseAFinal[y] = 0\n elif coarseA[y] == 'GG3':\n coarseAFinal[y] = 0\n elif coarseA[y] == 'GG4':\n coarseAFinal[y] = 0\n elif coarseA[y] == 'GG5':\n coarseAFinal[y] = 0\n elif coarseA[y] == 'GG6':\n coarseAFinal[y] = 0\n elif coarseA[y] == 'CS1':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS2':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS3':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS4':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS5':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS6':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS7':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS8':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS9':\n coarseAFinal[y] = 1\n else:\n print('Unidentified Variable in coarseA: ')\n print(coarseA[y])\n\n # One Hot Encode the sorted variables\n encodedMixP = pd.get_dummies(mixPFinal)\n encodedSCM = pd.get_dummies(scmFinal)\n encodedFineA = pd.get_dummies(fineAFinal)\n encodedCoarseA = pd.get_dummies(coarseAFinal)\n\n # Update the headers for onehotencoded variables\n # Get the current variable names\n encodedSCMlist = list(encodedSCM.columns.values)\n encodedFineAlist = list(encodedFineA.columns.values)\n encodedCoarseAlist = list(encodedCoarseA.columns.values)\n encodedMixPlist = list(encodedMixP.columns.values)\n # go through and replace the current names with the updated ones\n encodedSCM.rename(columns={encodedSCMlist[0]: 'SCM_0', encodedSCMlist[1]: 'SCM_1', encodedSCMlist[2]: 'SCM_1000'},\n inplace=True)\n encodedFineA.rename(columns={encodedFineAlist[0]: 'FineA_0', encodedFineAlist[1]: 'FineA_1'}, inplace=True)\n encodedCoarseA.rename(columns={encodedCoarseAlist[0]: 'CoarseA_0', encodedCoarseAlist[1]: 'CoarseA_1'},\n inplace=True)\n encodedMixP.rename(columns={encodedMixPlist[0]: 'MixP_0', encodedMixPlist[1]: 'MixP_1', encodedMixPlist[2]: 'MixP_2'},\n inplace=True)\n\n # Remake the dataframe to include the onehotencoded columns instead of the regular columns.\n firstHalf = allX.ix[:, :21]\n cte = allX.ix[:, 25]\n oneHotEncodedframe = pd.concat([encodedMixP, encodedSCM, encodedFineA, encodedCoarseA], axis=1)\n secondHalf = xValues.ix[:, 6:]\n completearray = pd.concat([firstHalf, cte, oneHotEncodedframe, secondHalf], axis=1)\n variablenames = list(completearray.columns.values)\n # convert to numpy array\n completenumpyarray = completearray.as_matrix()\n\n # remove the first 15 rows in the array to clear the NaN entries\n completenumpyarray = completenumpyarray[15:, :]\n # Also, remove the columns that include mix A as well as SCM_1000\n\n #####\n # Now, Ask whether or not to run decision trees on batch A data or batch B\n batch = input(\"which batch to run tests on (A or B)? \")\n\n if batch == \"A\":\n\n # break up the data into the batch A values\n batchAYcolumns = [0, 5, 6, 7, 8, 21]\n yvariables = np.transpose(completenumpyarray[:, batchAYcolumns])\n numyvariables = 6\n yvariablenames = [variablenames[x] for x in batchAYcolumns]\n batchAXcolumns = [23, 24, 25, 26, 28, 29, 30, 31, 32, 35, 38, 41]\n # normalize the x variables. Will normalize y variables in the main body\n # after a histogram of the data is created.\n xvariables = completenumpyarray[:, batchAXcolumns]\n # Normalize each of the x variables\n # get number of columns of x variables\n xVariablesShape = xvariables.shape\n # index through each of the columns and find the l2 norm\n for p in range(0, xVariablesShape[1]):\n x_mean = xvariables[:, p].mean()\n x_std = xvariables[:, p].std()\n # index through each value of the column (thus, go through each row) and divide by the l2 norm\n xvariables[:, p] = (xvariables[:, p] - x_mean) / x_std\n xvariablenames = [variablenames[x] for x in batchAXcolumns]\n\n elif batch == \"B\":\n\n # break up the data into the batch B values\n batchBYcolumns = [0, 1, 2, 3, 4, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]\n yvariables = np.transpose(completenumpyarray[:, batchBYcolumns])\n numyvariables = 17\n yvariablenames = [variablenames[x] for x in batchBYcolumns]\n batchBXcolumns = [23, 24, 25, 26, 28, 29, 30, 31, 33, 36, 39, 42]\n # normalize the x variables. Will normalize y variables in the main body\n # after a histogram of the data is created.\n xvariables = completenumpyarray[:, batchBXcolumns]\n # Normalize each of the x variables\n # get number of columns of x variables\n xVariablesShape = xvariables.shape\n # index through each of the columns and find the l2 norm\n for p in range(0, xVariablesShape[1]):\n x_mean = xvariables[:, p].mean()\n x_std = xvariables[:, p].std()\n # index through each value of the column (thus, go through each row) and divide by the l2 norm\n xvariables[:, p] = (xvariables[:, p] - x_mean) / x_std\n xvariablenames = [variablenames[x] for x in batchBXcolumns]\n\n else:\n print(\"Invalid Input.\")\n exit(0)\n\n return completenumpyarray, xvariables, filename, xvariablenames, yvariablenames, numyvariables, yvariables, batch", "def cAPM_Q_learning(df_train, df_test, MinRPT, MaxRPT_r1, MaxRPT, alpha,\n no_rounds):\n\n # number of periods in samples\n T = df_train.shape[0]\n T_test = df_test.shape[0]\n\n # number of agents\n K = df_test.shape[1]\n\n # individual forecasts\n F = df_train.iloc[:, 1:].values\n F_test = df_test.iloc[:, :].values\n\n # outcomes\n outcomes = df_train.iloc[:, 0].values\n\n # initialize matrices containing all predictionsa and bets\n pred_mat = np.full((no_rounds, K), np.nan, dtype=float)\n bet_mat = np.full((no_rounds, K), np.nan, dtype=float)\n\n # initialize vector containing estimated errors\n est_abs_error = np.full((no_rounds, K), np.nan, dtype=float)\n\n # initialize state identification matrix\n state_mat = np.full((no_rounds, K, 3), False, dtype=bool)\n\n # confidence in the crowd\n delta_mat = np.full((no_rounds, K, 3), 0, dtype=float)\n\n # budget initialization\n budgets = np.full(K, 1, dtype=float)\n start_budgets = np.full((no_rounds, K), 1, dtype=float)\n\n # market prediction vector\n market_pred = np.full(no_rounds, np.nan, dtype=float)\n\n # initialize vectors for storing error cluster means and counts\n # (small, medium, large)\n error_cl_mean = np.full(3, np.nan, dtype=float)\n error_cl_count = np.full(3, 0, dtype=float)\n no_errors = no_rounds*K\n\n # matrix containing the Q value for each action and state\n # dimensions: action, round, agent, state\n Q_val = np.full((2, no_rounds, K, 3), 0, dtype=float)\n\n # matrix containing starting predictions (before action) for Q-updates\n pred_mat_upd = np.full((2, no_rounds, K, 3), 0, dtype=float)\n\n # matrix containing market predictions shifted by one period for Q-updates\n market_pred_upd = np.full((no_rounds, K, 3), 0, dtype=float)\n\n ##############################\n # training the market - START#\n ##############################\n for i in range(T):\n\n # first round betting\n pred_mat[0, :] = F[i, :]\n current_bets = budgets * MaxRPT_r1\n bet_mat[0, :] = current_bets\n budgets -= current_bets\n market_pred[0] = np.dot(\n pred_mat[0, :], bet_mat[0, :]\n )/np.sum(bet_mat[0, :])\n\n # rest of rounds betting\n if i == 0:\n for j in range(1, no_rounds):\n\n pred_mat[j, :] = F[i, :]\n current_bets = budgets * MaxRPT\n bet_mat[j, :] = current_bets\n budgets -= current_bets\n\n else:\n for j in range(1, no_rounds):\n\n # screen shot start of round budgets for Q-val updates later\n np.copyto(start_budgets[j, :], budgets)\n\n # base for current invididual predictions are the last round\n # individual prediction\n np.copyto(pred_mat[j, :], pred_mat[j-1, :])\n\n # error estimates using market prediction of last round\n est_error = market_pred[j-1] - pred_mat[j, :]\n est_abs_error = np.abs(est_error)\n\n # identify in which state the agent is\n cl_dist = np.abs(error_cl_mean[:, np.newaxis] - est_abs_error)\n visited_state = np.swapaxes(np.amin(cl_dist, axis=0) == cl_dist,\n 0, 1)\n state_mat[j, :, :] = visited_state\n\n # choose action: 0-preserve, 1-change prediction\n decision = np.argmax(Q_val[:, j, visited_state], axis=0)\n pred_mat[j, :] += decision * delta_mat[j, visited_state] * est_error\n\n # reestime error after the actions are taken\n est_error = market_pred[j-1] - pred_mat[j, :]\n est_abs_error = np.abs(est_error)\n\n # estimate score\n accuracy_prime = np.maximum(100 * (1 - est_abs_error/oet), 1)\n score_prime = np.log(accuracy_prime)\n\n # bet according to estimated score\n worth_to_bet = score_prime >= 1\n current_bets = (\n worth_to_bet * MaxRPT + ~worth_to_bet * MinRPT\n ) * budgets\n bet_mat[j, :] = current_bets\n budgets -= current_bets\n\n # store the market prediction at the end of the round\n market_pred[j] = np.dot(\n pred_mat[j, :], bet_mat[j, :])/np.sum(bet_mat[j, :])\n\n # absolute errors from the current market\n abs_errors = np.abs(outcomes[i] - pred_mat)\n abs_errors_flat = abs_errors.flatten()\n\n # recompute error clusters (small, medium, large)\n error_order = np.random.choice(no_errors, no_errors, replace=False)\n\n if i == 0:\n # initialize error clusters\n for k in range(3):\n error_cl_mean[k] = abs_errors_flat[error_order[k]]\n error_cl_count[k] += 1\n for k in range(3, no_errors):\n inc_error = abs_errors_flat[error_order[k]]\n closest_cl = (np.abs(error_cl_mean - inc_error)).argmin()\n error_cl_mean[closest_cl] = (error_cl_count[closest_cl] * error_cl_mean[closest_cl] + inc_error) / (error_cl_count[closest_cl] + 1)\n error_cl_count[closest_cl] += 1\n # order: small, medium, large\n error_cl_mean = np.sort(error_cl_mean)\n # in case the cluster means do not differ (very low probability):\n # spread them\n if np.unique(error_cl_mean).size < 3:\n error_cl_mean += np.array([-1e-10, 1e-10, 2e-10])\n\n # initialize delta matrix (confidence in the crowd)\n\n else:\n for k in range(no_errors):\n inc_error = abs_errors_flat[error_order[k]]\n closest_cl = (np.abs(error_cl_mean - inc_error)).argmin()\n error_cl_mean[closest_cl] = (error_cl_count[closest_cl] * error_cl_mean[closest_cl] + inc_error) / (error_cl_count[closest_cl] + 1)\n error_cl_count[closest_cl] += 1\n # in case the cluster means do not differ (very low probability):\n # spread them\n if np.unique(error_cl_mean).size < 3:\n error_cl_mean += np.array([-1e-10, 1e-10, 2e-10])\n\n # use IQR to determine outlier error threshold\n Q3, Q1 = np.percentile(abs_errors, [75, 25])\n IQR = Q3 - Q1\n oet = Q3 + (IQR*1.5)\n\n # compute revenues of the agents based on accuracy of their predictions\n accuracy = np.maximum(100 * (1 - abs_errors/oet), 1)\n score = np.log(accuracy)\n revenue = np.multiply(score, bet_mat)\n\n # reward agents and rescale the budget (for computational reasons)\n budgets = budgets + np.sum(revenue, axis=0)\n budgets = budgets/np.sum(budgets)\n\n # update Q-values and deltas (possible after the first market)\n if i > 0:\n # update Q-values\n pred_mat_upd[:, 1:, :, :] = pred_mat[np.newaxis, :-1, :, np.newaxis]\n market_pred_upd[1:, :, :] = market_pred[:-1, np.newaxis, np.newaxis]\n pred_mat_upd[1, :, :, :] += delta_mat * (market_pred_upd - pred_mat_upd[1, :, :, :])\n abs_errors_upd = np.abs(outcomes[i] - pred_mat_upd)\n accuracy_upd = np.maximum(100 * (1 - abs_errors_upd/oet), 1)\n score_upd = np.log(accuracy_upd)\n worth_to_bet_upd = score_upd >= 1\n bet_mat_upd = np.multiply(\n worth_to_bet_upd * MaxRPT + ~worth_to_bet_upd * MinRPT,\n start_budgets[np.newaxis, :, :, np.newaxis]\n )\n potential_rev = np.multiply(score_upd, bet_mat_upd)\n Q_val += alpha*(potential_rev*state_mat[np.newaxis, :, :, :] - Q_val)\n # update deltas (confidence in the wisdom of the crowd)\n num = outcomes[i]-pred_mat\n denum = market_pred[:, np.newaxis]-pred_mat\n # prevent division by zero\n denum[denum == 0] = 1e-10\n experience = np.clip(num/denum, a_min=0, a_max=1)\n delta_mat += alpha*(experience[:, :, np.newaxis]*state_mat - delta_mat)\n\n ############################\n # training the market - END#\n ############################\n\n # using the market for the out-of-sample predictions\n pred = np.full(T_test, np.nan, dtype=float)\n for i in range(T_test):\n\n # reset to the trained market budget\n budgets_test = np.copy(budgets)\n\n # first round betting\n pred_mat[0, :] = F_test[i, :]\n current_bets = budgets_test * MaxRPT_r1\n bet_mat[0, :] = current_bets\n budgets_test -= current_bets\n market_pred[0] = np.dot(\n pred_mat[0, :], bet_mat[0, :]\n )/np.sum(bet_mat[0, :])\n\n # rest of rounds betting\n for j in range(1, no_rounds):\n\n # base for current invididual predictions are the last round\n # individual prediction\n np.copyto(pred_mat[j, :], pred_mat[j-1, :])\n\n # error estimates using market prediction of last round\n est_error = market_pred[j-1] - pred_mat[j, :]\n est_abs_error = np.abs(est_error)\n\n # identify in which state the agent is\n cl_dist = np.abs(error_cl_mean[:, np.newaxis] - est_abs_error)\n visited_state = np.swapaxes(\n np.amin(cl_dist, axis=0) == cl_dist, 0, 1)\n\n # choose action: 0-preserve, 1-change prediction\n decision = np.argmax(Q_val[:, j, visited_state], axis=0)\n pred_mat[j, :] += decision * delta_mat[j, visited_state]*est_error\n\n # reestime error after the actions are taken\n est_error = market_pred[j-1] - pred_mat[j, :]\n est_abs_error = np.abs(est_error)\n\n # estimate score\n accuracy_prime = np.maximum(100 * (1 - est_abs_error/oet), 1)\n score_prime = np.log(accuracy_prime)\n\n # bet according to estimated score\n worth_to_bet = score_prime >= 1\n current_bets = (\n worth_to_bet * MaxRPT + ~worth_to_bet * MinRPT\n ) * budgets_test\n bet_mat[j, :] = current_bets\n budgets_test -= current_bets\n\n # store the market prediction at the end of the round\n market_pred[j] = np.dot(\n pred_mat[j, :], bet_mat[j, :])/np.sum(bet_mat[j, :])\n\n # final round market prediction is the c-APM prediction\n pred[i] = market_pred[no_rounds-1]\n\n # output\n df_pred = pd.DataFrame(\n {\"c-APM (Q-learning)\": pred},\n index=df_test.index\n )\n\n return df_pred", "def correct_bad_chair(phases_dict):\n if len(phases_dict[\"instance_idx\"]) - 1 != phases_dict[\"n_objects\"]:\n # remove the empty object\n obj_points = []\n n_empty_obj = 0\n opt_ids = []\n for opt_id, opts in enumerate(phases_dict[\"obj_points\"]):\n if not opts.shape[0] == 0:\n obj_points.append(opts)\n opt_ids.append(opt_id)\n else:\n n_empty_obj += 1\n phases_dict[\"obj_points\"] = obj_points\n phases_dict[\"before_fix_n_objects\"] = phases_dict[\"n_objects\"]\n phases_dict[\"n_objects\"] = len(obj_points)\n phases_dict[\"bad_lamp\"] = True\n phases_dict[\"ok_obj_id\"] = opt_ids\n assert(len(phases_dict[\"instance_idx\"]) - 1 == phases_dict[\"n_objects\"])\n return True\n else:\n # there is empty mesh in drop\n\n if \"drop\" in phases_dict[\"trial_dir\"] and \"train/50\" in phases_dict[\"trial_dir\"]:\n\n n_empty_obj = 0\n opt_ids = []\n for opt_id, opts in enumerate(phases_dict[\"obj_points\"]):\n if not opts.shape[0] == 0:\n opt_ids.append(opt_id)\n else:\n n_empty_obj += 1\n if n_empty_obj > 0:\n\n\n list_items = [\"root_des_radius\", \"root_num\", \"clusters\", \"instance\", \"material\", \"obj_points\"]\n for item in list_items:\n phases_dict[item] = [phases_dict[item][a] for a in opt_ids]\n new_instance_idx = [0]\n for obj_pts in phases_dict[\"obj_points\"]:\n new_instance_idx.append(new_instance_idx[-1] + obj_pts.shape[0])\n\n phases_dict[\"instance_idx\"] = new_instance_idx\n phases_dict[\"n_objects\"] = len(phases_dict[\"obj_points\"])\n phases_dict[\"ok_obj_id\"] = opt_ids\n\n assert(phases_dict[\"n_particles\"] == new_instance_idx[-1])\n assert(len(phases_dict[\"instance_idx\"]) - 1 == phases_dict[\"n_objects\"])\n assert(len(phases_dict[\"root_num\"]) == phases_dict[\"n_objects\"])\n return True\n else:\n return False\n\n\n return False", "def do_califa(outfile='NGC4047.pipe3d.hdf5', gallist=['NGC4047'], \n fitsdir='fits_natv_edge', comomdir=None, colabel='co.smo7',\n ext='', nsm=2, ortpar='edge_leda.csv', distpar='edge_califa.csv',\n distcol='caDistP3d', hexgrid=False, allpix=False, debug=False, \n prob=True, discard_cdmatrix=False, append=True, overwrite=True):\n if allpix:\n stride = [1,1,1]\n else:\n stride = [3,3,1]\n\n if len(gallist) == 0:\n raise RuntimeError('Error: gallist is empty!')\n\n # cuts for when to apply BD correction\n hacut = 0.06 # 1e-16 erg / (cm2 s) - no longer used\n hbcut = 0.04 # 1e-16 erg / (cm2 s) - no longer used\n ahalo = 0 # mag\n ahahi = 6 # mag\n\n # FITS keywords important for astrometry\n wcskeys = ['CTYPE1', 'CTYPE2', 'CRVAL1', 'CRVAL2', 'CRPIX1', 'CRPIX2', \n 'CDELT1', 'CDELT2']\n cdkeys = ['CD1_1', 'CD1_2', 'CD2_1', 'CD2_2', 'CD1_3', 'CD2_3',\n 'CD3_1', 'CD3_2', 'CD3_3']\n dimkeys = ['NAXIS1', 'NAXIS2']\n\n # Get the orientation parameters from LEDA\n orttbl = EdgeTable(ortpar)\n orttbl.add_index('Name') \n\n # Get the distance from the CALIFA table\n disttbl = EdgeTable(distpar)\n disttbl.add_index('Name')\n\n # Read the FITS data\n # The columns to save are defined in fitsextract.py\n prodtype = ['ELINES', 'SFH', 'SSP', 'indices', 'flux_elines']\n leadstr = ['', '', '', 'indices.CS.', 'flux_elines.']\n tailstr = ['.ELINES', '.SFH', '.SSP', '', '']\n tailstr = [s+'.cube.fits.gz' for s in tailstr]\n\n for i_prod, prod in enumerate(prodtype):\n zsel, labels, units, nsel = getlabels(prod)\n default_len = len(zsel)\n tlist = []\n\n if prod == 'SFH':\n # Required file for SFH lum to mass conversion\n models = SSPModels('gsd01_156.fits')\n print('Number of model steps:',models.n_models)\n nlumcols = models.n_models\n\n for i_gal, gal in enumerate(gallist):\n print('\\nWorking on galaxy {} product {} nsel={}'.format(\n gal, prod, nsel))\n\n # Read in Pipe3D output\n cafile = os.path.join(fitsdir,leadstr[i_prod]+gal+tailstr[i_prod])\n if not os.path.exists(cafile):\n print('####### Cannot find',cafile)\n continue \n hdu = fits.open(cafile, ignore_missing_end=True)[0]\n cahd = hdu.header.copy()\n # Blanking of CTYPE3 so that fitsextract treats cubes as pseudocubes\n cahd['CTYPE3'] = ''\n # Set CDELT3 to 1 since that will be its value in template\n for key in ['CDELT3', 'CD3_3']:\n if key in cahd.keys():\n cahd[key] = 1.\n\n # Read in CO template\n if comomdir is not None:\n cofile = os.path.join(comomdir,gal+'.'+colabel+'_dil.snrpk.fits.gz')\n if not os.path.exists(cofile):\n print('####### Cannot find',cofile)\n continue\n cohd = fits.getheader(cofile)\n # Copy the CALIFA header and replace wcskeys with CO values\n for key in dimkeys+wcskeys:\n if key in cohd.keys():\n cahd[key] = cohd[key]\n # Need to discard CD matrix which would override the new wcskeys\n if 'CDELT1' in cohd.keys() and 'CDELT2' in cohd.keys():\n for key in cdkeys:\n if key in cahd.keys():\n del cahd[key]\n # Optionally discard CD matrix in CALIFA files and fall back on CDELTs\n if discard_cdmatrix:\n for key in cdkeys:\n if key in hdu.header.keys():\n del hdu.header[key]\n if debug:\n print('\\nINPUT',WCS(hdu.header))\n print('\\nCO data',WCS(cohd))\n print('\\nOUTPUT',WCS(cahd))\n newim = reproject_interp(hdu, cahd, order=0, return_footprint=False)\n if debug:\n fits.writeto(cafile.replace('.fits','.rg.fits'), newim, cahd, \n overwrite=True)\n else:\n newim = hdu.data\n\n # Set up output table\n nz = newim.shape[0]\n if debug:\n print('nz=',nz)\n col_lbl = [s+ext for s in labels]\n\n # Add smoothed Ha and Hb columns for extinction estimates\n if prod == 'ELINES' or prod == 'flux_elines':\n kernel = Gaussian2DKernel(nsm)\n if prod == 'ELINES':\n hb_idx = 5\n ha_idx = 6\n col_lbl += ['Hbeta_sm'+str(nsm)+ext, 'Halpha_sm'+str(nsm)+ext]\n cahd['DESC_20'] = ' Hbeta after {} pix smooth'.format(str(nsm))\n cahd['DESC_21'] = ' Halpha after {} pix smooth'.format(str(nsm))\n else:\n hb_idx = 28\n ha_idx = 45\n col_lbl += ['flux_Hbeta_sm'+str(nsm)+ext, 'flux_Halpha_sm'+str(nsm)+ext]\n hb_conv = convolve(newim[hb_idx,:,:], kernel, preserve_nan=True)\n ha_conv = convolve(newim[ha_idx,:,:], kernel, preserve_nan=True)\n newim = np.concatenate((newim, hb_conv[np.newaxis], ha_conv[np.newaxis]))\n if len(zsel) == default_len:\n zsel = list(zsel) + [nz, nz+1]\n if len(units) == default_len:\n units += ['10^-16 erg cm^-2 s^-1', '10^-16 erg cm^-2 s^-1']\n\n if i_prod == 0:\n print(\"RA, DEC, PA, INC:\",orttbl.loc[gal]['ledaRA'],\n orttbl.loc[gal]['ledaDE'], orttbl.loc[gal]['ledaPA'],\n orttbl.loc[gal]['ledaAxIncl'])\n tab0 = fitsextract(newim, header=cahd, keepnan=True, stride=stride, \n bunit=units, col_lbl=col_lbl, zselect=zsel, \n ra_gc=15*orttbl.loc[gal]['ledaRA'],\n dec_gc=orttbl.loc[gal]['ledaDE'], \n pa=orttbl.loc[gal]['ledaPA'],\n inc=orttbl.loc[gal]['ledaAxIncl'], \n ortlabel='LEDA', first=True, use_hexgrid=hexgrid)\n gname = Column([np.string_(gal)]*len(tab0), name='Name', \n description='Galaxy Name')\n tab0.add_column(gname, index=0)\n \n # Add additional columns\n if prod == 'ELINES' or prod == 'flux_elines':\n if prod == 'ELINES':\n prfx = ''\n else:\n prfx = 'flux_'\n # Provide labels for flux_elines columns\n for linecol in labels:\n if linecol.startswith('e_'):\n linetype = linecol.split('_')[1]\n linename = linecol.split('_')[2]\n prelbl = 'error in '\n else:\n linetype = linecol.split('_')[0]\n linename = linecol.split('_')[1]\n prelbl = ''\n if linetype == 'flux':\n suffix = 'intensity'\n elif linetype == 'vel':\n suffix = 'velocity'\n elif linetype == 'disp':\n suffix = 'velocity dispersion'\n elif linetype == 'EW':\n suffix = 'equivalent width'\n tab0[linecol+ext].description=prelbl+linename+' '+suffix\n tab0['flux_Hbeta_sm'+str(nsm)+ext].description=\\\n 'Hbeta intensity after {} pix smooth'.format(str(nsm))\n tab0['flux_Halpha_sm'+str(nsm)+ext].description=\\\n 'Halpha intensity after {} pix smooth'.format(str(nsm))\n\n # sfr0 is SFR from Halpha without extinction correction\n sfr0 = sfr_ha(tab0[prfx+'Halpha'+ext], imf='salpeter', \n name=prfx+'sigsfr0'+ext)\n e_sfr0 = Column(sfr0 *\n abs(tab0['e_'+prfx+'Halpha'+ext]/tab0[prfx+'Halpha'+ext]), \n name='e_'+prfx+'sigsfr0'+ext, dtype='f4', unit=sfr0.unit,\n description='error of uncorrected SFR surface density')\n tab0.add_columns([sfr0, e_sfr0])\n\n # Balmer decrement corrected SFR\n sfr_cor, A_Ha, e_sfr_cor, e_A_Ha = sfr_ha(\n tab0[prfx+'Halpha'+ext], \n flux_hb=tab0[prfx+'Hbeta'+ext], \n e_flux_ha=tab0['e_'+prfx+'Halpha'+ext],\n e_flux_hb=tab0['e_'+prfx+'Hbeta'+ext], \n imf='salpeter', \n name=prfx+'sigsfr_corr'+ext)\n # For negative extinction we assume A=0\n sfr_cor[A_Ha < ahalo] = sfr0[A_Ha < ahalo]\n e_sfr_cor[A_Ha < ahalo] = e_sfr0[A_Ha < ahalo]\n # For high extinction we blank the value\n sfr_cor[A_Ha > ahahi] = np.nan\n e_sfr_cor[A_Ha > ahahi] = np.nan\n tab0.add_columns([sfr_cor, e_sfr_cor, A_Ha, e_A_Ha])\n\n # Halpha extinction and SFR after smoothing and clipping\n A_Ha_smo = Column(get_AHa(tab0[prfx+'Halpha_sm'+str(nsm)+ext], \n tab0[prfx+'Hbeta_sm'+str(nsm)+ext], np.log10), \n name=prfx+'AHa_smooth'+str(nsm)+ext, dtype='f4', unit='mag',\n description='Ha extinction after {} pix smooth'.format(str(nsm)))\n sfr_smo = Column(sfr0 * 10**(0.4*A_Ha_smo),\n name=prfx+'sigsfr_adopt'+ext, dtype='f4', unit=sfr0.unit,\n description='smooth+clip BD corrected SFR surface density')\n # For negative extinction we assume A=0\n sfr_smo[A_Ha_smo < ahalo] = sfr0[A_Ha_smo < ahalo]\n # For high extinction we blank the value\n sfr_smo[A_Ha_smo > ahahi] = np.nan\n tab0.add_columns([A_Ha_smo, sfr_smo])\n\n # BPT requires flux_elines since EW(Ha) is part of classification\n if prod == 'flux_elines':\n if prob:\n BPT0, BPT0sf, p_BPT0 = bpt_type(tab0, ext=ext, name='BPT'+ext, \n prob=prob)\n tab0.add_columns([BPT0, p_BPT0, BPT0sf])\n else:\n BPT0, BPT0sf = bpt_type(tab0, ext=ext, name='BPT'+ext, \n prob=prob)\n tab0.add_columns([BPT0, BPT0sf])\n #\n zoh0, zoherr0 = ZOH_M13(tab0, ext=ext, name='ZOH'+ext, err=True)\n tab0.add_columns([zoh0, zoherr0])\n\n elif prod == 'SFH':\n if i_gal == 0:\n f_young = []\n # For star formation history also calculate mass fractions\n # Multiply the luminosity fraction by M/L ratio and re-normalize\n lumcols = Table(tab0.columns[9:nlumcols+9])\n df_lum = lumcols.to_pandas()\n df_mass = df_lum.multiply(models.mass_to_light, axis='columns')\n df_norm = df_mass.divide(df_mass.sum(axis=1), axis='index')\n df_norm.columns = [x.replace('lum','mass') for x in list(df_norm.columns)]\n # Add aggregated mass fraction columns to table\n agecols = [s.split('_')[2] for s in df_norm.columns.values]\n metcols = [s.split('_')[4] for s in df_norm.columns.values]\n df_age = df_norm.groupby(agecols, sort=False, axis=1).sum(min_count=1)\n df_age = df_age.reindex(sorted(df_age.columns, key=float), axis=1)\n df_age.columns = ['massfrac_age_'+x+ext for x in list(df_age.columns)]\n # Total the mass fractions < 32 Myr for later SFR calculation\n f_young.append(np.array(df_age[df_age.columns[:12]].sum(axis=1, \n min_count=1).astype(np.float32)))\n df_met = df_norm.groupby(metcols, axis=1).sum(min_count=1)\n df_met.columns = ['massfrac_met_'+x+ext for x in list(df_met.columns)]\n naggcols = len(df_age.columns) + len(df_met.columns)\n print('Number of aggregated columns:', naggcols)\n t_mass_age = Table.from_pandas(df_age.astype(np.float32))\n t_mass_met = Table.from_pandas(df_met.astype(np.float32))\n indexcols = Table(tab0.columns[:9])\n lumaggcols = Table(tab0.columns[nlumcols+9:nlumcols+naggcols+9])\n erraggcols = Table(tab0.columns[2*nlumcols+naggcols+9:])\n tab0 = hstack([indexcols, lumaggcols, erraggcols,\n t_mass_age.filled(np.nan), \n t_mass_met.filled(np.nan)], join_type='exact')\n tab0.add_column(f_young[i_gal], name='f_young')\n tab0['f_young'].description='total mass fraction < 32 Myr'\n for i_col in range(naggcols):\n newname=lumaggcols.columns[i_col].name.replace('lum','mass')\n newdesc=lumaggcols.columns[i_col].description.replace('Luminosity','Mass')\n tab0[newname].description = newdesc\n tab0[newname].unit = 'fraction'\n\n elif prod == 'SSP':\n # For stellar surface density we need distance\n star0 = stmass_pc2(tab0['mass_ssp'+ext], dz=tab0['cont_dezon'+ext],\n dist=disttbl.loc[gal][distcol], name='sigstar'+ext)\n avstar0 = stmass_pc2(tab0['mass_Avcor_ssp'+ext], dz=tab0['cont_dezon'+ext],\n dist=disttbl.loc[gal][distcol], name='sigstar_Avcor'+ext)\n avstar0.description += ' dust corrected'\n ferr0 = Column(abs(tab0['e_medflx_ssp'+ext]/tab0['medflx_ssp'+ext]), \n name='fe_medflx'+ext, dtype='f4', unit='fraction',\n description='fractional error in continuum flux')\n tab0.add_columns([star0, avstar0, ferr0])\n # Add the SSP-based SFR if SFH was run\n try:\n ssp_sfr = Column(f_young[i_gal] * star0 / (0.032*u.Gyr),\n name='sigsfr_ssp'+ext, dtype='f4',\n description='Sigma_SFR from < 32 Myr SSP')\n avssp_sfr = Column(f_young[i_gal] * avstar0 / (0.032*u.Gyr),\n name='sigsfr_Avcor_ssp'+ext, dtype='f4',\n description='Sigma_SFR Av-corrected from < 32 Myr SSP')\n tab0.add_columns([ssp_sfr, avssp_sfr])\n except NameError:\n pass\n\n tlist.append(tab0)\n\n if len(tlist) > 0:\n t_merge = vstack(tlist)\n t_merge.meta['date'] = datetime.today().strftime('%Y-%m-%d')\n if debug:\n print(t_merge.colnames)\n print('There are',len(t_merge),'rows in merged table')\n\n if prod == prodtype[0]:\n t_merge.write(outfile, path=prod+ext, overwrite=overwrite, \n append=append, serialize_meta=True, compression=True)\n else:\n t_merge.write(outfile, path=prod+ext, overwrite=overwrite, \n append=True, serialize_meta=True, compression=True)\n return", "def inputdata(self,maxnumcon_,maxnumvar_,c_,cfix_,aptrb_,aptre_,asub_,aval_,bkc_,blc_,buc_,bkx_,blx_,bux_):\n numcon_ = None\n if numcon_ is None:\n numcon_ = len(buc_)\n elif numcon_ != len(buc_):\n raise IndexError(\"Inconsistent length of array buc\")\n if numcon_ is None:\n numcon_ = len(blc_)\n elif numcon_ != len(blc_):\n raise IndexError(\"Inconsistent length of array blc\")\n if numcon_ is None:\n numcon_ = len(bkc_)\n elif numcon_ != len(bkc_):\n raise IndexError(\"Inconsistent length of array bkc\")\n numvar_ = None\n if numvar_ is None:\n numvar_ = len(c_)\n elif numvar_ != len(c_):\n raise IndexError(\"Inconsistent length of array c\")\n if numvar_ is None:\n numvar_ = len(bux_)\n elif numvar_ != len(bux_):\n raise IndexError(\"Inconsistent length of array bux\")\n if numvar_ is None:\n numvar_ = len(blx_)\n elif numvar_ != len(blx_):\n raise IndexError(\"Inconsistent length of array blx\")\n if numvar_ is None:\n numvar_ = len(bkx_)\n elif numvar_ != len(bkx_):\n raise IndexError(\"Inconsistent length of array bkx\")\n if numvar_ is None:\n numvar_ = len(aptrb_)\n elif numvar_ != len(aptrb_):\n raise IndexError(\"Inconsistent length of array aptrb\")\n if numvar_ is None:\n numvar_ = len(aptre_)\n elif numvar_ != len(aptre_):\n raise IndexError(\"Inconsistent length of array aptre\")\n if isinstance(c_, numpy.ndarray) and c_.dtype is numpy.dtype(numpy.float64) and c_.flags.contiguous:\n _c_copyarray = False\n _c_tmp = ctypes.cast(c_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif c_ is not None:\n _c_copyarray = True\n _c_np_tmp = numpy.zeros(len(c_),numpy.dtype(numpy.float64))\n _c_np_tmp[:] = c_\n assert _c_np_tmp.flags.contiguous\n _c_tmp = ctypes.cast(_c_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _c_copyarray = False\n _c_tmp = None\n \n if aptrb_ is None:\n raise ValueError(\"Argument aptrb cannot be None\")\n if aptrb_ is None:\n raise ValueError(\"Argument aptrb may not be None\")\n if isinstance(aptrb_, numpy.ndarray) and aptrb_.dtype is numpy.dtype(numpy.int64) and aptrb_.flags.contiguous:\n _aptrb_copyarray = False\n _aptrb_tmp = ctypes.cast(aptrb_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif aptrb_ is not None:\n _aptrb_copyarray = True\n _aptrb_np_tmp = numpy.zeros(len(aptrb_),numpy.dtype(numpy.int64))\n _aptrb_np_tmp[:] = aptrb_\n assert _aptrb_np_tmp.flags.contiguous\n _aptrb_tmp = ctypes.cast(_aptrb_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _aptrb_copyarray = False\n _aptrb_tmp = None\n \n if aptre_ is None:\n raise ValueError(\"Argument aptre cannot be None\")\n if aptre_ is None:\n raise ValueError(\"Argument aptre may not be None\")\n if isinstance(aptre_, numpy.ndarray) and aptre_.dtype is numpy.dtype(numpy.int64) and aptre_.flags.contiguous:\n _aptre_copyarray = False\n _aptre_tmp = ctypes.cast(aptre_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n elif aptre_ is not None:\n _aptre_copyarray = True\n _aptre_np_tmp = numpy.zeros(len(aptre_),numpy.dtype(numpy.int64))\n _aptre_np_tmp[:] = aptre_\n assert _aptre_np_tmp.flags.contiguous\n _aptre_tmp = ctypes.cast(_aptre_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int64))\n else:\n _aptre_copyarray = False\n _aptre_tmp = None\n \n if asub_ is None:\n raise ValueError(\"Argument asub cannot be None\")\n if asub_ is None:\n raise ValueError(\"Argument asub may not be None\")\n if isinstance(asub_, numpy.ndarray) and asub_.dtype is numpy.dtype(numpy.int32) and asub_.flags.contiguous:\n _asub_copyarray = False\n _asub_tmp = ctypes.cast(asub_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif asub_ is not None:\n _asub_copyarray = True\n _asub_np_tmp = numpy.zeros(len(asub_),numpy.dtype(numpy.int32))\n _asub_np_tmp[:] = asub_\n assert _asub_np_tmp.flags.contiguous\n _asub_tmp = ctypes.cast(_asub_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _asub_copyarray = False\n _asub_tmp = None\n \n if aval_ is None:\n raise ValueError(\"Argument aval cannot be None\")\n if aval_ is None:\n raise ValueError(\"Argument aval may not be None\")\n if isinstance(aval_, numpy.ndarray) and aval_.dtype is numpy.dtype(numpy.float64) and aval_.flags.contiguous:\n _aval_copyarray = False\n _aval_tmp = ctypes.cast(aval_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif aval_ is not None:\n _aval_copyarray = True\n _aval_np_tmp = numpy.zeros(len(aval_),numpy.dtype(numpy.float64))\n _aval_np_tmp[:] = aval_\n assert _aval_np_tmp.flags.contiguous\n _aval_tmp = ctypes.cast(_aval_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _aval_copyarray = False\n _aval_tmp = None\n \n if bkc_ is None:\n raise ValueError(\"Argument bkc cannot be None\")\n if bkc_ is None:\n raise ValueError(\"Argument bkc may not be None\")\n if bkc_ is not None:\n _bkc_tmp = (ctypes.c_int32 * len(bkc_))(*bkc_)\n else:\n _bkc_tmp = None\n if blc_ is None:\n raise ValueError(\"Argument blc cannot be None\")\n if blc_ is None:\n raise ValueError(\"Argument blc may not be None\")\n if isinstance(blc_, numpy.ndarray) and blc_.dtype is numpy.dtype(numpy.float64) and blc_.flags.contiguous:\n _blc_copyarray = False\n _blc_tmp = ctypes.cast(blc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif blc_ is not None:\n _blc_copyarray = True\n _blc_np_tmp = numpy.zeros(len(blc_),numpy.dtype(numpy.float64))\n _blc_np_tmp[:] = blc_\n assert _blc_np_tmp.flags.contiguous\n _blc_tmp = ctypes.cast(_blc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _blc_copyarray = False\n _blc_tmp = None\n \n if buc_ is None:\n raise ValueError(\"Argument buc cannot be None\")\n if buc_ is None:\n raise ValueError(\"Argument buc may not be None\")\n if isinstance(buc_, numpy.ndarray) and buc_.dtype is numpy.dtype(numpy.float64) and buc_.flags.contiguous:\n _buc_copyarray = False\n _buc_tmp = ctypes.cast(buc_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif buc_ is not None:\n _buc_copyarray = True\n _buc_np_tmp = numpy.zeros(len(buc_),numpy.dtype(numpy.float64))\n _buc_np_tmp[:] = buc_\n assert _buc_np_tmp.flags.contiguous\n _buc_tmp = ctypes.cast(_buc_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _buc_copyarray = False\n _buc_tmp = None\n \n if bkx_ is None:\n raise ValueError(\"Argument bkx cannot be None\")\n if bkx_ is None:\n raise ValueError(\"Argument bkx may not be None\")\n if bkx_ is not None:\n _bkx_tmp = (ctypes.c_int32 * len(bkx_))(*bkx_)\n else:\n _bkx_tmp = None\n if blx_ is None:\n raise ValueError(\"Argument blx cannot be None\")\n if blx_ is None:\n raise ValueError(\"Argument blx may not be None\")\n if isinstance(blx_, numpy.ndarray) and blx_.dtype is numpy.dtype(numpy.float64) and blx_.flags.contiguous:\n _blx_copyarray = False\n _blx_tmp = ctypes.cast(blx_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif blx_ is not None:\n _blx_copyarray = True\n _blx_np_tmp = numpy.zeros(len(blx_),numpy.dtype(numpy.float64))\n _blx_np_tmp[:] = blx_\n assert _blx_np_tmp.flags.contiguous\n _blx_tmp = ctypes.cast(_blx_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _blx_copyarray = False\n _blx_tmp = None\n \n if bux_ is None:\n raise ValueError(\"Argument bux cannot be None\")\n if bux_ is None:\n raise ValueError(\"Argument bux may not be None\")\n if isinstance(bux_, numpy.ndarray) and bux_.dtype is numpy.dtype(numpy.float64) and bux_.flags.contiguous:\n _bux_copyarray = False\n _bux_tmp = ctypes.cast(bux_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif bux_ is not None:\n _bux_copyarray = True\n _bux_np_tmp = numpy.zeros(len(bux_),numpy.dtype(numpy.float64))\n _bux_np_tmp[:] = bux_\n assert _bux_np_tmp.flags.contiguous\n _bux_tmp = ctypes.cast(_bux_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _bux_copyarray = False\n _bux_tmp = None\n \n res = __library__.MSK_XX_inputdata64(self.__nativep,maxnumcon_,maxnumvar_,numcon_,numvar_,_c_tmp,cfix_,_aptrb_tmp,_aptre_tmp,_asub_tmp,_aval_tmp,_bkc_tmp,_blc_tmp,_buc_tmp,_bkx_tmp,_blx_tmp,_bux_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def inputdata(self,maxnumcon_,maxnumvar_,c,cfix_,aptrb,aptre,asub,aval,bkc,blc,buc,bkx,blx,bux): # 3\n numcon_ = None\n if numcon_ is None:\n numcon_ = len(buc)\n elif numcon_ != len(buc):\n raise IndexError(\"Inconsistent length of array buc\")\n if numcon_ is None:\n numcon_ = len(blc)\n elif numcon_ != len(blc):\n raise IndexError(\"Inconsistent length of array blc\")\n if numcon_ is None:\n numcon_ = len(bkc)\n elif numcon_ != len(bkc):\n raise IndexError(\"Inconsistent length of array bkc\")\n if numcon_ is None: numcon_ = 0\n numvar_ = None\n if numvar_ is None:\n numvar_ = len(c)\n elif numvar_ != len(c):\n raise IndexError(\"Inconsistent length of array c\")\n if numvar_ is None:\n numvar_ = len(bux)\n elif numvar_ != len(bux):\n raise IndexError(\"Inconsistent length of array bux\")\n if numvar_ is None:\n numvar_ = len(blx)\n elif numvar_ != len(blx):\n raise IndexError(\"Inconsistent length of array blx\")\n if numvar_ is None:\n numvar_ = len(bkx)\n elif numvar_ != len(bkx):\n raise IndexError(\"Inconsistent length of array bkx\")\n if numvar_ is None:\n numvar_ = len(aptrb)\n elif numvar_ != len(aptrb):\n raise IndexError(\"Inconsistent length of array aptrb\")\n if numvar_ is None:\n numvar_ = len(aptre)\n elif numvar_ != len(aptre):\n raise IndexError(\"Inconsistent length of array aptre\")\n if numvar_ is None: numvar_ = 0\n if c is None:\n c_ = None\n else:\n try:\n c_ = memoryview(c)\n except TypeError:\n try:\n _tmparr_c = array.array(\"d\",c)\n except TypeError:\n raise TypeError(\"Argument c has wrong type\")\n else:\n c_ = memoryview(_tmparr_c)\n \n else:\n if c_.format != \"d\":\n c_ = memoryview(array.array(\"d\",c))\n \n if aptrb is None: raise TypeError(\"Invalid type for argument aptrb\")\n if aptrb is None:\n aptrb_ = None\n else:\n try:\n aptrb_ = memoryview(aptrb)\n except TypeError:\n try:\n _tmparr_aptrb = array.array(\"q\",aptrb)\n except TypeError:\n raise TypeError(\"Argument aptrb has wrong type\")\n else:\n aptrb_ = memoryview(_tmparr_aptrb)\n \n else:\n if aptrb_.format != \"q\":\n aptrb_ = memoryview(array.array(\"q\",aptrb))\n \n if aptre is None: raise TypeError(\"Invalid type for argument aptre\")\n if aptre is None:\n aptre_ = None\n else:\n try:\n aptre_ = memoryview(aptre)\n except TypeError:\n try:\n _tmparr_aptre = array.array(\"q\",aptre)\n except TypeError:\n raise TypeError(\"Argument aptre has wrong type\")\n else:\n aptre_ = memoryview(_tmparr_aptre)\n \n else:\n if aptre_.format != \"q\":\n aptre_ = memoryview(array.array(\"q\",aptre))\n \n if asub is None: raise TypeError(\"Invalid type for argument asub\")\n if asub is None:\n asub_ = None\n else:\n try:\n asub_ = memoryview(asub)\n except TypeError:\n try:\n _tmparr_asub = array.array(\"i\",asub)\n except TypeError:\n raise TypeError(\"Argument asub has wrong type\")\n else:\n asub_ = memoryview(_tmparr_asub)\n \n else:\n if asub_.format != \"i\":\n asub_ = memoryview(array.array(\"i\",asub))\n \n if aval is None: raise TypeError(\"Invalid type for argument aval\")\n if aval is None:\n aval_ = None\n else:\n try:\n aval_ = memoryview(aval)\n except TypeError:\n try:\n _tmparr_aval = array.array(\"d\",aval)\n except TypeError:\n raise TypeError(\"Argument aval has wrong type\")\n else:\n aval_ = memoryview(_tmparr_aval)\n \n else:\n if aval_.format != \"d\":\n aval_ = memoryview(array.array(\"d\",aval))\n \n if bkc is None: raise TypeError(\"Invalid type for argument bkc\")\n if bkc is None:\n bkc_ = None\n else:\n try:\n bkc_ = memoryview(bkc)\n except TypeError:\n try:\n _tmparr_bkc = array.array(\"i\",bkc)\n except TypeError:\n raise TypeError(\"Argument bkc has wrong type\")\n else:\n bkc_ = memoryview(_tmparr_bkc)\n \n else:\n if bkc_.format != \"i\":\n bkc_ = memoryview(array.array(\"i\",bkc))\n \n if blc is None: raise TypeError(\"Invalid type for argument blc\")\n if blc is None:\n blc_ = None\n else:\n try:\n blc_ = memoryview(blc)\n except TypeError:\n try:\n _tmparr_blc = array.array(\"d\",blc)\n except TypeError:\n raise TypeError(\"Argument blc has wrong type\")\n else:\n blc_ = memoryview(_tmparr_blc)\n \n else:\n if blc_.format != \"d\":\n blc_ = memoryview(array.array(\"d\",blc))\n \n if buc is None: raise TypeError(\"Invalid type for argument buc\")\n if buc is None:\n buc_ = None\n else:\n try:\n buc_ = memoryview(buc)\n except TypeError:\n try:\n _tmparr_buc = array.array(\"d\",buc)\n except TypeError:\n raise TypeError(\"Argument buc has wrong type\")\n else:\n buc_ = memoryview(_tmparr_buc)\n \n else:\n if buc_.format != \"d\":\n buc_ = memoryview(array.array(\"d\",buc))\n \n if bkx is None: raise TypeError(\"Invalid type for argument bkx\")\n if bkx is None:\n bkx_ = None\n else:\n try:\n bkx_ = memoryview(bkx)\n except TypeError:\n try:\n _tmparr_bkx = array.array(\"i\",bkx)\n except TypeError:\n raise TypeError(\"Argument bkx has wrong type\")\n else:\n bkx_ = memoryview(_tmparr_bkx)\n \n else:\n if bkx_.format != \"i\":\n bkx_ = memoryview(array.array(\"i\",bkx))\n \n if blx is None: raise TypeError(\"Invalid type for argument blx\")\n if blx is None:\n blx_ = None\n else:\n try:\n blx_ = memoryview(blx)\n except TypeError:\n try:\n _tmparr_blx = array.array(\"d\",blx)\n except TypeError:\n raise TypeError(\"Argument blx has wrong type\")\n else:\n blx_ = memoryview(_tmparr_blx)\n \n else:\n if blx_.format != \"d\":\n blx_ = memoryview(array.array(\"d\",blx))\n \n if bux is None: raise TypeError(\"Invalid type for argument bux\")\n if bux is None:\n bux_ = None\n else:\n try:\n bux_ = memoryview(bux)\n except TypeError:\n try:\n _tmparr_bux = array.array(\"d\",bux)\n except TypeError:\n raise TypeError(\"Argument bux has wrong type\")\n else:\n bux_ = memoryview(_tmparr_bux)\n \n else:\n if bux_.format != \"d\":\n bux_ = memoryview(array.array(\"d\",bux))\n \n res = self.__obj.inputdata64(maxnumcon_,maxnumvar_,numcon_,numvar_,c_,cfix_,aptrb_,aptre_,asub_,aval_,bkc_,blc_,buc_,bkx_,blx_,bux_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def run_pca(data_file, rs, n_components, outfile1, outfile2):\n print('running PCA with n_components={}'.format(n_components))\n day_batcher = DayBatcher(data_file, skiprow=1, delimiter=' ')\n mat = day_batcher.next_batch()\n rst = []\n while mat is not None:\n if mat.shape[1] == 13:\n # use compact10d\n datadict = {'features': mat[:, 3:],\n 'red': mat[:, 2],\n 'user': mat[:, 1],\n 'day': mat[:, 0]}\n else:\n # use all_fixed\n datadict = {'features': mat[:, 14:],\n 'red': mat[:, 13],\n 'user': mat[:, 1],\n 'day': mat[:, 0]}\n batch = scale(datadict['features'])\n pca = PCA(n_components=n_components, random_state=rs)\n pca.fit(batch)\n data_reduced = np.dot(batch, pca.components_.T) # pca transform\n data_original = np.dot(data_reduced, pca.components_) # inverse_transform\n pointloss = np.mean(np.square(batch - data_original), axis=1)\n loss = np.mean(pointloss)\n for d, u, t, l, in zip(datadict['day'].tolist(),\n datadict['user'].tolist(),\n datadict['red'].tolist(),\n pointloss.flatten().tolist()):\n rst.append((u, d, l, t))\n mat = day_batcher.next_batch()\n train_rst, test_rst = split_train_test(rst)\n save_rst(train_rst, outfile1)\n save_rst(test_rst, outfile2)\n eval_cr(test_rst, 'pca')", "def main(argv=None):\n args = inputs(argv)\n site = args.site\n node = args.node\n sensor = args.sensor\n method = args.method\n stream = args.stream\n deploy = args.deploy\n start = args.start\n stop = args.stop\n\n # check if we are specifying a deployment or a specific date and time range\n if not deploy or (start and stop):\n return SyntaxError('You must specify either a deployment number or beginning and end dates of interest.')\n\n # if we are specifying a deployment number, then get the data from the Gold Copy THREDDS server\n if deploy:\n optaa = load_gc_thredds(site, node, sensor, method, stream, ('.*deployment%04d.*OPTAA.*\\\\.nc$' % deploy))\n cal_file = ('{}-{}-{}.deploy{:02d}.cal_coeffs.json'.format(site, node, sensor, deploy))\n\n # check to see if we downloaded any data\n if not optaa:\n exit_text = ('Data unavailable for %s-%s-%s, %s, %s, deployment %d.' % (site, node, sensor, method,\n stream, deploy))\n raise SystemExit(exit_text)\n\n else:\n # otherwise, request the data for download from OOINet via the M2M API using the specified dates\n r = m2m_request(site, node, sensor, method, stream, start, stop)\n if not r:\n exit_text = ('Request failed for %s-%s-%s, %s, %s, from %s to %s.' % (site, node, sensor, method,\n stream, start, stop))\n raise SystemExit(exit_text)\n\n # OPTAA data is different from other instruments. it needs to be processed on a per-deployment basis in order\n # to get the correct number of wavelengths before it can be merged into a single dataset. create a list of\n # all the files that were returned by the M2M request, and determine the deployments that are included in the\n # request\n files = list_files(r['allURLs'][0], '.+OPTAA.+\\\\.nc$')\n if not files:\n exit_text = ('Data unavailable for %s-%s-%s, %s, %s, from %s to %s.' % (site, node, sensor, method,\n stream, start, stop))\n raise SystemExit(exit_text)\n\n deployments = np.unique([int(sub.split('/')[3][10:14]) for sub in files])\n\n # loop through the deployments and download the data for each one\n optaa = []\n cal_file = []\n for deploy in deployments:\n # Valid M2M request, download the data on a per-deployment basis\n data = m2m_collect(r, ('.*deployment%04d.*OPTAA.*\\\\.nc$' % deploy))\n if data:\n optaa.append(data)\n cal_file.append('{}-{}-{}.deploy{:02d}.cal_coeffs.json'.format(site, node, sensor, deploy))\n\n # check to see if we downloaded any data (remove empty/none entries from the list)\n if not optaa:\n exit_text = ('Data unavailable for %s-%s-%s, %s, %s, from %s to %s.' % (site, node, sensor, method,\n stream, start, stop))\n raise SystemExit(exit_text)\n\n # set up the calibration file path and name(s)\n out_file = os.path.abspath(args.outfile)\n cal_path = os.path.dirname(out_file)\n if not os.path.exists(cal_path):\n os.makedirs(cal_path)\n\n # clean-up and reorganize the data\n multi = isinstance(optaa, list)\n if node in ['SF01A', 'SF01B', 'SF03A']:\n # this OPTAA is on a shallow profiler\n if multi:\n for i, ds in enumerate(optaa):\n cfile = os.path.join(cal_path, cal_file[i])\n optaa[i] = optaa_profiler(ds, cfile)\n optaa = xr.concat(optaa, dim='time')\n else:\n cal_file = os.path.join(cal_path, cal_file)\n optaa = optaa_profiler(optaa, cal_file)\n else:\n # this OPTAA is on one of the two benthic platforms\n if multi:\n for i, ds in enumerate(optaa):\n cfile = os.path.join(cal_path, cal_file[i])\n optaa[i] = optaa_benthic(ds, cfile)\n optaa = xr.concat(optaa, dim='time')\n else:\n optaa = optaa_benthic(optaa, cal_file)\n\n # get the vocabulary information for the site, node, and sensor and update the dataset attributes\n vocab = get_vocabulary(site, node, sensor)[0]\n optaa = optaa.sortby(['deployment', 'time'])\n optaa = update_dataset(optaa, vocab['maxdepth'])\n\n # save the data to disk\n if not os.path.exists(os.path.dirname(out_file)):\n os.makedirs(os.path.dirname(out_file))\n optaa.to_netcdf(out_file, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS)", "def apply_manual_codes(cls, user, data, coda_input_dir):\n for plan in PipelineConfiguration.TEST_SHOWS_CODING_PLANS:\n test_pipeline_messages = [td for td in data if plan.raw_field in td]\n coda_input_path = path.join(coda_input_dir, plan.coda_filename)\n\n f = None\n try:\n if path.exists(coda_input_path):\n f = open(coda_input_path, 'r')\n TracedDataCodaV2IO.import_coda_2_to_traced_data_iterable_multi_coded(\n user, test_pipeline_messages, plan.id_field, {plan.coded_field: plan.code_scheme}, f)\n\n if plan.binary_code_scheme is not None:\n if f is not None:\n f.seek(0)\n TracedDataCodaV2IO.import_coda_2_to_traced_data_iterable(\n user, test_pipeline_messages, plan.id_field, {plan.binary_coded_field:plan.binary_code_scheme}, f)\n finally:\n if f is not None:\n f.close()\n\n # Merge manually coded survey files into cleaned dataset\n for plan in PipelineConfiguration.SURVEY_CODING_PLANS:\n f = None\n try:\n coda_input_path = path.join(coda_input_dir, plan.coda_filename)\n if path.exists(coda_input_path):\n f = open(coda_input_path, 'r')\n TracedDataCodaV2IO.import_coda_2_to_traced_data_iterable(\n user, data, plan.id_field, {plan.coded_field: plan.code_scheme}, f)\n finally:\n if f is not None:\n f.close()\n \n return data", "def control_variation(df, outDir, features_to_analyse, \n variables_to_analyse=[\"date_yyyymmdd\"], \n remove_outliers=True, \n p_value_threshold=0.05, \n PCs_to_keep=10):\n \n # Record non-data columns before dropping feature columns \n other_colnames = [col for col in df.columns if col not in features_to_analyse]\n \n # Drop columns that contain only zeros\n colnames_before = list(df.columns)\n AllZeroFeats = df[features_to_analyse].columns[(df[features_to_analyse] == 0).all()]\n df = df.drop(columns=AllZeroFeats)\n colnames_after = list(df.columns)\n zero_cols = [col for col in colnames_before if col not in colnames_after]\n if len(zero_cols) > 0:\n print(\"Dropped %d features with all-zero summaries:\\n%s\" % (len(zero_cols), zero_cols))\n \n # Record feature column names after dropping zero data\n features_to_analyse = [feat for feat in df.columns if feat not in other_colnames]\n \n # Remove outliers from the dataset \n if remove_outliers:\n df, indsOutliers = removeOutliersMahalanobis(df, features_to_analyse)\n remove_outliers = False \n # NB: Ensure Mahalanobis operation to remove outliers is performed only once!\n\n # Check for normality in features to analyse in order decide which \n # statistical test to use: one-way ANOVA (parametric) or Kruskal-Wallis \n # (non-parametric) test\n TEST = check_normality(df, features_to_analyse, p_value_threshold)\n\n # Record name of statistical test used (kruskal/f_oneway)\n test_name = str(TEST).split(' ')[1].split('.')[-1].split('(')[0].split('\\'')[0]\n\n # CONTROL VARIATION: STATS (ANOVAs)\n # - Does N2 worm behaviour on control vary across experiment days? \n # (worms are larger? Shorter L1 diapuase? Camera focus/FOV adjusted? Skewed by non-worm tracked objects?\n # Did not record time when worms were refed! Could be this. If so, worms will be bigger across all foods on that day) \n # - Perform ANOVA to see if features vary across imaging days for control\n # - Perform Tukey HSD post-hoc analyses for pairwise differences between imaging days\n # - Highlight outlier imaging days and investigate reasons why\n # - Save list of top significant features for outlier days - are they size-related features?\n for grouping_variable in variables_to_analyse:\n print(\"\\nTESTING: %s\\n\" % grouping_variable)\n \n if not len(df[grouping_variable].unique()) > 1:\n print(\"Need at least two groups for stats to investigate %s\" % grouping_variable)\n else:\n print(\"Performing %s tests for '%s'\" % (test_name, grouping_variable)) \n \n test_results_df, sigfeats_out = \\\n topfeats_ANOVA_by_group(df, \n grouping_variable, \n features_to_analyse,\n TEST,\n p_value_threshold)\n \n # Ensure directory exists to save results\n Path(outDir).mkdir(exist_ok=True, parents=True)\n \n # Define outpaths\n froot = 'control_variation_in_' + grouping_variable + '_' + test_name\n stats_outpath = outDir / (froot + \"_results.csv\")\n sigfeats_outpath = outDir / (froot + \"_significant_features.csv\")\n \n # Save test statistics + significant features list to file\n test_results_df.to_csv(stats_outpath)\n sigfeats_out.to_csv(sigfeats_outpath, header=False)\n\n # Box plots\n plotDir = outDir / \"Plots\"\n topfeats_boxplots_by_group(df, \n test_results_df, \n grouping_variable,\n plot_save_dir=plotDir, #save to plotDir\n p_value_threshold=p_value_threshold)\n \n # PCA (coloured by grouping variable, eg. experiment date)\n df = doPCA(df, \n grouping_variable, \n features_to_analyse,\n plot_save_dir = plotDir,\n PCs_to_keep = PCs_to_keep)", "def data_preparation(self) -> None:\n self.logger.info('data cleaning')\n self.logger.info('num of secs: {}, num of ipo_dates: {}, num of secs with prices: {}'.format(\n len(self.data),\n len(self.ipo_dates),\n len(self.prices)\n ))\n excluded = []\n excluded = [i.lower() for i in excluded]\n self.logger.info(f'number of excluded: {len(excluded)}')\n for i in excluded:\n self.data.pop(i)\n for s in self.data:\n # columns with empty assets sum (empty columns and other situations)\n self.data[s].dropna(axis='columns', how='any', subset=['A_0'], inplace=True)\n # columns with descriptions (polish and english names of values)\n self.data[s].drop(self.data[s].columns[[0, 1]], inplace=True, axis=1)\n\n self.logger.info(f'number of secs after cleaning: {len(self.data)}')\n data_list = [k for k in self.data.values()]\n self.uber_data = pd.concat(data_list, ignore_index=True, axis=1)\n self.uber_data = self.uber_data.transpose()\n self.uber_data = self.uber_data.loc[:, pd.notnull(self.uber_data.columns)]", "def parametersweep(basedir,configfile,acfdir='ACF',invtype='tik'):\n\n alpha_sweep=sp.logspace(-3.5,sp.log10(7),25)\n costdir = os.path.join(basedir,'Cost')\n ionoinfname=os.path.join(basedir,acfdir,'00lags.h5')\n ionoin=IonoContainer.readh5(ionoinfname)\n \n dirio = ('Spectrums','Mat','ACFMat')\n inputdir = os.path.join(basedir,dirio[0])\n \n dirlist = glob.glob(os.path.join(inputdir,'*.h5'))\n (listorder,timevector,filenumbering,timebeg,time_s) = IonoContainer.gettimes(dirlist)\n Ionolist = [dirlist[ikey] for ikey in listorder]\n \n RSTO = RadarSpaceTimeOperator(Ionolist,configfile,timevector,mattype='Sim')\n \n npts=RSTO.simparams['numpoints']\n \n ionospec=makeionocombined(dirlist)\n if npts==ionospec.Param_List.shape[-1]:\n tau,acfin=spect2acf(ionospec.Param_Names,ionospec.Param_List)\n nloc,ntimes=acfin.shape[:2]\n ambmat=RSTO.simparams['amb_dict']['WttMatrix']\n np=ambmat.shape[0]\n acfin_amb=sp.zeros((nloc,ntimes,np),dtype=acfin.dtype)\n # get the original acf\n \n \n ambmat=RSTO.simparams['amb_dict']['WttMatrix']\n np=ambmat.shape[0]\n \n for iloc,locarr in enumerate(acfin):\n for itime,acfarr in enumerate(locarr):\n acfin_amb[iloc,itime]=sp.dot(ambmat,acfarr)\n acfin_amb=acfin_amb[:,0]\n else:\n acfin_amb=ionospec.Param_List[:,0]\n \n if not os.path.isdir(costdir):\n os.mkdir(costdir)\n # pickle file stuff \n pname=os.path.join(costdir,'cost{0}-{1}.pickle'.format(acfdir,invtype))\n\n alpha_list=[]\n errorlist=[]\n errorlaglist=[]\n datadiflist=[]\n constlist=[]\n if 'perryplane' in basedir.lower() or 'SimpData':\n rbounds=[-500,500]\n else:\n rbounds=[0,500]\n\n alpha_list_new=alpha_sweep.tolist()\n for i in alpha_list:\n if i in alpha_list_new:\n alpha_list_new.remove(i)\n \n for i in alpha_list_new:\n ionoout,datadif,constdif=invertRSTO(RSTO,ionoin,alpha_list=i,invtype=invtype,rbounds=rbounds,Nlin=1)\n \n datadiflist.append(datadif)\n constlist.append(constdif)\n acfout=ionoout.Param_List[:,0]\n alpha_list.append(i)\n outdata=sp.power(sp.absolute(acfout-acfin_amb),2)\n aveerror=sp.sqrt(sp.nanmean(outdata,axis=0))\n errorlaglist.append(aveerror)\n errorlist.append(sp.nansum(aveerror))\n \n pickleFile = open(pname, 'wb')\n pickle.dump([alpha_list,errorlist,datadiflist,constlist,errorlaglist],pickleFile)\n pickleFile.close()\n mkalphalist(pname)\n alphaarr=sp.array(alpha_list)\n errorarr=sp.array(errorlist)\n errorlagarr=sp.array(errorlaglist)\n datadif=sp.array(datadiflist)\n constdif=sp.array(constlist)\n fig,axlist,axmain=plotalphaerror(alphaarr,errorarr,errorlagarr)\n fig.savefig(os.path.join(costdir,'cost{0}-{1}.png'.format(acfdir,invtype)))\n \n fig,axlist=plotLcurve(alphaarr,datadif,constdif)\n fig.savefig(os.path.join(costdir,'lcurve{0}-{1}.png'.format(acfdir,invtype)))", "def processing_inputs(data, mk_cols_list, link_cols_list, cross_cols_list, WIDE_COLS, arrival=True):\n print('*-'*40, processing_inputs)\n if arrival:\n mk_cols_list = mk_cols_list + ['lk_arrival_0_percent', 'lk_arrival_1_percent','lk_arrival_2_percent', 'lk_arrival_3_percent', 'lk_arrival_4_percent']\n mk_cols_list = mk_cols_list + ['zsl_link_arrival_status_mean','zsl_link_arrival_status_nunique','zsl_link_arrival_status0','zsl_link_arrival_status1','zsl_link_arrival_status2','zsl_link_arrival_status3']\n if 'lk_arrival_0_percent' in mk_cols_list:\n print('The lk_arrival_0_percent in the mk_cols_list')\n #print('*-' * 40, 'EXIT')\n #sys.exit(0)\n print('111'*40, 'HAVE FEATURES OF ARRIVAL')\n else:\n print('222'*40, 'HAVENOT FEATURES OF ARRIVAL')\n if 'ata' in mk_cols_list:\n print('The ata in the mk_cols_list')\n print('*-' * 40, 'EXIT')\n sys.exit(0)\n if 'ata' in link_cols_list:\n print('The ata in the link_cols_list')\n if 'ata' in cross_cols_list:\n print('The ata in the cross_cols_list')\n if 'ata' in WIDE_COLS:\n print('The ata in the WIDE_COLS')\n print('*-' * 40, 'EXIT')\n sys.exit(0)\n data_link_inputs = preprocess_inputs(data, cols=link_cols_list)\n data.drop(columns=link_cols_list, axis=1, inplace=True)\n gc.collect()\n print('drop the link_cols_list')\n # print(data_link_inputs[:, :, :1])\n # data['cross_id'] = data['cross_id'].str.replace('nan','0')\n data_cross_inputs = preprocess_inputs(data, cols=cross_cols_list)\n data.drop(columns=cross_cols_list, axis=1, inplace=True)\n gc.collect()\n print('drop the cross_cols_list')\n\n data_deep_input = data[mk_cols_list]\n data_wide_input = data[WIDE_COLS].values\n data_inputs_slice = data['slice_id'].values\n data_labels = data['ata']\n if arrival:\n arrival_col = ['lk_arrival_0_percent', 'lk_arrival_1_percent',\n 'lk_arrival_2_percent', 'lk_arrival_3_percent', 'lk_arrival_4_percent']\n data_arrival = data[arrival_col]\n print('*-'*40, 'data_arrival', data_arrival.shape)\n return data_link_inputs, data_cross_inputs, data_deep_input, data_wide_input, data_inputs_slice, data_labels, data_arrival\n else:\n return data_link_inputs, data_cross_inputs, data_deep_input, data_wide_input, data_inputs_slice, data_labels", "def main():\n\n # Get dataset and create pandas dataframe\n f_data = \"../data/dataset.xlsx\"\n df = pd.read_excel(f_data)\n\n # Get variables for indices\n years = list(set(df[\"Year\"][3:]))\n years_arr = df[\"Year\"][3:]\n\n # Get values from dataset\n population = df[\"Population.1\"][3:]\n auto_commuters = df[\"Auto\"][3:]\n free_traffic = df[\"Freeway\"][3:]\n arterial_traffic = df[\"Arterial Street\"][3:]\n general_time_value = df[\"Cost Components\"][3:]\n commercial_time_value = df[\"Unnamed: 12\"][3:]\n gasoline_cost = df[\"Unnamed: 13\"][3:]\n diesel_cost = df[\"Unnamed: 14\"][3:]\n excess_fuel_per_commuter = df[\"Unnamed: 20\"][3:]\n annual_hrs_of_delay = df[\"Unnamed: 24\"][3:]\n travel_time_index = df[\"Travel Time Index\"][3:]\n cost_per_autocommuter = df[\"Unnamed: 34\"][3:]\n uber = df[\"Uber Entry Dummies\"][3:]\n lyft = df[\"Lyft Entry Dummies\"][3:]\n both = df[\"UberXlyft\"][3:]\n unemployment = df[\"Unemployment Rate (%)\"][3:]\n\n # Get covariances\n filled_ump = copy.deepcopy(unemployment).fillna(value=0)\n print(\"Correlation of uber and ump: {}\".format(np.corrcoef(filled_ump, uber)))\n print(\"Correlation of lyft and ump: {}\".format(np.corrcoef(filled_ump, lyft)))\n print(\"Covariance of tti and ump: {}\".format(np.corrcoef(filled_ump,\n travel_time_index.astype(np.float32))))\n print(\"Covariance of cost and ump: {}\".format(np.corrcoef(filled_ump,\n cost_per_autocommuter.astype(np.float32))))\n print(\"Covariance of excess and ump: {}\".format(np.corrcoef(filled_ump,\n excess_fuel_per_commuter.astype(np.float32))))\n print(\"Covariance of delay and ump: {}\".format(np.corrcoef(filled_ump,\n annual_hrs_of_delay.astype(np.float32))))\n\n # Create output data structure\n year_dict = {years[i]: {\"pop\": [], \"auto\": [], \"free\": [], \"art\": [],\n \"gen_time\": [], \"comm_time\": [], \"gas\": [], \"diesel\":\n [], \"ann_delay\": [], \"travel_index\": [], \"cost\":\n [], \"ub\": [], \"ly\": [], \"bo\": [], \"ump\": [],\n \"excess_gas\": []} for i in range(len(years))}\n\n # Counter variable\n i = 0\n\n # Iterate through everything for plots\n for year, pop, auto, free, art, gen_time, comm_time, gas, diesel, excess_gas, \\\n ann_delay, travel_index, cost, ub, ly, bo, ump in \\\n zip(years_arr, population, auto_commuters, free_traffic,\n arterial_traffic, general_time_value, commercial_time_value,\n gasoline_cost, diesel_cost, excess_fuel_per_commuter,\n annual_hrs_of_delay, travel_time_index, cost_per_autocommuter,\n uber, lyft, both, unemployment):\n\n # Append values to dictionary for plotting\n year_dict[year][\"pop\"].append(pop)\n year_dict[year][\"auto\"].append(auto)\n year_dict[year][\"free\"].append(free)\n year_dict[year][\"art\"].append(art)\n year_dict[year][\"gen_time\"].append(gen_time)\n year_dict[year][\"comm_time\"].append(comm_time)\n year_dict[year][\"gas\"].append(gas)\n year_dict[year][\"diesel\"].append(diesel)\n year_dict[year][\"ann_delay\"].append(ann_delay)\n year_dict[year][\"travel_index\"].append(travel_index)\n year_dict[year][\"cost\"].append(cost)\n year_dict[year][\"ub\"].append(ub)\n year_dict[year][\"ly\"].append(ly)\n year_dict[year][\"bo\"].append(bo)\n year_dict[year][\"ump\"].append(ump)\n year_dict[year][\"excess_gas\"].append(excess_gas)\n\n # Average values according to year\n for key_i in list(year_dict.keys()):\n for key_j in list(year_dict[key_i].keys()):\n vals = copy.deepcopy(year_dict[key_i][key_j])\n year_dict[key_i][key_j] = np.mean(vals)\n\n # Now make arrays for time series data\n pop_by_year = [year_dict[years[i]][\"pop\"] for i in range(len(years))]\n auto_by_year = [year_dict[years[i]][\"auto\"] for i in range(len(years))]\n free_by_year = [year_dict[years[i]][\"free\"] for i in range(len(years))]\n art_by_year = [year_dict[years[i]][\"art\"] for i in range(len(years))]\n gen_time_by_year = [year_dict[years[i]][\"gen_time\"] for i in range(len(years))]\n comm_time_by_year = [year_dict[years[i]][\"comm_time\"] for i in range(len(\n years))]\n gas_by_year = [year_dict[years[i]][\"gas\"] for i in range(len(years))]\n diesel_by_year = [year_dict[years[i]][\"diesel\"] for i in range(len(years))]\n ann_delay_by_year = [year_dict[years[i]][\"ann_delay\"] for i in range(len(\n years))]\n travel_index_by_year = [year_dict[years[i]][\"travel_index\"] for i in\n range(len(years))]\n cost_by_year = [year_dict[years[i]][\"cost\"] for i in range(len(years))]\n ub_by_year = [year_dict[years[i]][\"ub\"] for i in range(len(years))]\n ly_by_year = [year_dict[years[i]][\"ly\"] for i in range(len(years))]\n bo_by_year = [year_dict[years[i]][\"bo\"] for i in range(len(years))]\n ump_by_year = [year_dict[years[i]][\"ump\"] for i in range(len(years))]\n excess_gas_per_year = [year_dict[years[i]][\"excess_gas\"] for i in range(len(\n years))]\n\n\n # Make plots\n plt.plot(years, pop_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Average Population of UMR Urban Centers (1000s)\")\n plt.title(\"Average Population of Urban Mobility Report Urban Centers over Time\")\n plt.savefig(\"../graphs/pop_vs_time.png\")\n plt.clf()\n\n plt.plot(years, auto_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Autocommuters (1000s)\")\n plt.title(\"Average Number of Autocommuters in UMI Urban Centers (1000s)\")\n plt.savefig(\"../graphs/auto_vs_time.png\")\n plt.clf()\n\n plt.plot(years, free_by_year, color=\"b\", label=\"Freeways\")\n plt.plot(years, art_by_year, color=\"r\", label=\"Arterial Roads\")\n plt.legend()\n plt.xlabel(\"Year\")\n plt.ylabel(\"Driving Distance (miles)\")\n plt.title(\"Average Net Freeway/Arterial Road Driving over Time (\"\n \"1000s of miles)\")\n plt.savefig(\"../graphs/dist_vs_time.png\")\n plt.clf()\n\n plt.plot(years, gen_time_by_year, color=\"b\", label=\"General Value\")\n plt.plot(years, comm_time_by_year, color=\"r\", label=\"Commercial Value\")\n plt.legend()\n plt.xlabel(\"Year\")\n plt.ylabel(\"Value ($/hr)\")\n plt.title(\"Average General and Commercial Values of Time over Time\")\n plt.savefig(\"../graphs/val_of_time_vs_time.png\")\n plt.clf()\n\n plt.plot(years, gas_by_year, color=\"b\", label=\"Gasoline\")\n plt.plot(years, diesel_by_year, color=\"r\", label=\"Diesel\")\n plt.legend()\n plt.xlabel(\"Year\")\n plt.ylabel(\"Cost ($/gallon)\")\n plt.title(\"Average Cost of Gasoline and Diesel Fuel over Time\")\n plt.savefig(\"../graphs/gas_vs_time.png\")\n plt.clf()\n\n plt.plot(years, ann_delay_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Annual per-Commuter Traffic Delays (hrs)\")\n plt.title(\"Average Annual per-Commuter Traffic Delays over Time\")\n plt.savefig(\"../graphs/delay_vs_time.png\")\n plt.clf()\n\n plt.plot(years, travel_index_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Travel Index\")\n plt.title(\"Average Travel Index over Time\")\n plt.savefig(\"../graphs/index_vs_time.png\")\n plt.clf()\n\n plt.plot(years, ump_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Unemployment Rate (%)\")\n plt.title(\"Average Unemployment Rate over Time\")\n plt.savefig(\"../graphs/ump_vs_time.png\")\n plt.clf()\n\n plt.plot(years, cost_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Cost ($)\")\n plt.title(\"Average Annual per-Capita Cost of Traffic Congestion over Time\")\n plt.savefig(\"../graphs/cost_vs_time.png\")\n plt.clf()\n\n plt.plot(years, excess_gas_per_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Excess Fuel Consumed (Gallons)\")\n plt.title(\"Average Annual per-Capita Excess Fuel Consumed over Time\")\n plt.savefig(\"../graphs/extra_fuel_vs_time.png\")\n plt.clf()\n\n x = list(lyft) # Lyft data\n y = list(uber) # Uber data\n bins = [2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018]\n\n plt.hist([x, y], bins, label=['Lyft', 'Uber'])\n plt.legend(loc='upper right')\n plt.xlabel(\"Year\")\n plt.ylabel(\"Number of cities entered\")\n plt.title(\"Uber and Lyft Entry into Urban Mobility Report Cities\")\n plt.clf()", "def compute_absorption(self, atmosphere, grid):\n t = atmosphere[\"temperature\"]\n dims = list(t.dims) + [\"mechanism\", \"wavenumber\"]\n sizes = tuple([x for x in t.sizes.values()] + [4, grid.size])\n beta = {\"{}_absorption\".format(name): DataArray(zeros(sizes), dims=dims)\n for name in self.gas.keys()}\n for name, gas in self.gas.items():\n for i in range(t.data.size):\n vmr = {x: atmosphere[\"vmr_{}\".format(x)].data.flat[i]\n for x in self.gas.keys()}\n k = gas.absorption_coefficient(t.data.flat[i],\n atmosphere[\"pressure\"].data.flat[i],\n vmr, grid)\n i = unravel_index(i, t.data.shape)\n for j, (source, data) in enumerate(k.items()):\n indices = tuple(list(i) + [j, slice(None)])\n if source == \"cia\":\n bsum = zeros(grid.size)\n for values in data.values():\n bsum += values[:]\n beta[\"{}_absorption\".format(name)].values[indices] = bsum[:]\n elif source == \"continua\":\n bsum = zeros(grid.size)\n for values in data.values():\n bsum += values[:]\n beta[\"{}_absorption\".format(name)].values[indices] = bsum[:]\n else:\n beta[\"{}_absorption\".format(name)].values[indices] = data[:]\n return Dataset(beta)", "def baselineSolutions(filename):\n \n def getOtherSaCurrentAnts( ):\n \"\"\" Retrieve ants in other science subarrays. \"\"\"\n if subarrayNo == 1:\n otherSa = Subarray.getSubarrayRef( 2 )\n otherSaAntAssignments = otherSa.getAntennaAssignments( )\n elif subarrayNo == 2:\n otherSa = Subarray.getSubarrayRef( 1 )\n otherSaAntAssignments = otherSa.getAntennaAssignments( )\n else:\n otherSa1Ants = Subarray.getSubarrayRef(1).getAntennaAssignments()\n otherSa2Ants = Subarray.getSubarrayRef(2).getAntennaAssignments()\n otherSaAntAssignments = otherSa1Ants + otherSa2Ants\n\n otherSaAnts = [ i.carmaAntennaNo for i in otherSaAntAssignments ]\n return otherSaAnts\n\n def toosmall( val ):\n if abs( val ) < 0.0001:\n return True\n else:\n return False\n\n import fileIOPython as fIOP\n\n # First determine which antennas will receive updates.\n antPosVals = fIOP.fileToTable( filename, ignoreEmpty=True, comment=\"#\" )\n antsInThisSa = currentAntennaNumbers()\n antsInOtherScienceSa = getOtherSaCurrentAnts()\n antNo = 0\n validAnts = []\n invalidAnts = []\n validAntsInOtherScienceSa = []\n validAntsNotInThisSa = []\n for pos in antPosVals:\n antNo += 1\n x,y,z = float(pos[0]), float(pos[1]), float(pos[2])\n if toosmall(x) and toosmall(y) and toosmall(z):\n invalidAnts.append( antNo )\n else:\n if antNo in antsInOtherScienceSa:\n validAntsInOtherScienceSa.append( antNo )\n if antNo not in antsInThisSa:\n validAntsNotInThisSa.append( antNo )\n validAnts.append( antNo )\n\n if len( validAntsInOtherScienceSa ) > 0:\n rfValidAntsInOtherScienceSa = helpers.formatAsRanges( validAntsInOtherScienceSa )\n error = \"\"\n if len(rfValidAntsInOtherScienceSa) > 1:\n error += \"Ants %s currently belong to other science subarrays. \"%(rfValidAntsInOtherScienceSa)\n error += \"Please remove them prior to rerunning this command.\"\n else:\n error += \"Ant %s currently belongs to another science subarray. \"%(rfValidAntsInOtherScienceSa)\n error += \"Please remove it prior to rerunning this command.\"\n printError( error )\n return\n\n \n antNo = 0\n for pos in antPosVals:\n antNo += 1\n if antNo in invalidAnts:\n continue\n x,y,z = float(pos[0]), float(pos[1]), float(pos[2])\n baseOff = s.convertBaseline( x, y, z, antNo )\n\n if ( baseOff.antNo != antNo ): # Sanity check\n raise Exception,\"Mismatched ant no.\"\n \n padOffset( round( baseOff.east, 2 ), \n round( baseOff.north, 2 ),\n round( baseOff.up, 2 ),\n antNo )\n\n s.setAntPosFilename( filename )\n\n rfValidAnts = helpers.formatAsRanges( validAnts )\n if ( rfValidAnts > 1 ):\n print \"Solutions applied for ants %s.\"%(rfValidAnts)\n else:\n print \"Solutions applied for ant %s.\"%(rfValidAnts)\n \n if len( invalidAnts ) > 0:\n rfInvalidAnts = helpers.formatAsRanges( invalidAnts )\n if ( len( rfInvalidAnts ) > 1 ):\n printWarning( \"Antpos does NOT contain solutions for ants %s.\"%(rfInvalidAnts) )\n else:\n printWarning( \"Antpos does NOT contain a solution for ant %s.\"%(rfInvalidAnts) )\n\n if len( validAntsNotInThisSa ) > 0:\n rfValidAntsNotInThisSa = helpers.formatAsRanges( validAntsNotInThisSa )\n warning = \"Solutions were applied for \"\n if len(rfValidAntsNotInThisSa) > 1:\n warning += \"ants %s which do \"%rfValidAntsNotInThisSa\n else:\n warning += \"ant %s which does \"%rfValidAntsNotInThisSa\n warning += \"not belong to this subarray. The new solutions \"\n warning += \"may not be reflected in the monitor system until these \"\n warning += \"ants are added back into a science subarray.\" \n printWarning( warning )", "def explore_FAAM_aerosol_data():\n # -- PCASP\n dsPCASP = get_FAAM_mineral_dust_calibration(instrument='PCASP',\n rtn_values=False)\n # -- CDP\n dsCDP = get_FAAM_mineral_dust_calibration(instrument='CDP',\n rtn_values=False)\n # only consider \"potential dust\" above a certain size?\n # Use 100 um for now", "def fitDAC(fineThresholds, coarseThresholds, bias, adcs0, adcs1, limLow, limHigh, fitLim):\n \n # array to hold fit parameters\n \n mevs = numarray.zeros((calConstant.NUM_TEM, calConstant.NUM_ROW, calConstant.NUM_END,\n calConstant.NUM_FE, 2), numarray.Float32)\n \n # array to hold range info \n \n ranges = numarray.zeros((calConstant.NUM_TEM, calConstant.NUM_ROW, calConstant.NUM_END,\n calConstant.NUM_FE), numarray.Int8) \n \n adcs0 = adcs0 - bias\n adcs1 = adcs1 - bias\n adcs0 = adcs0[...,numarray.NewAxis]\n adcs1 = adcs1[...,numarray.NewAxis]\n\n # compare upper and lower limits against FINE range thresholds\n q = (fineThresholds > adcs0) & (fineThresholds < adcs1)\n \n for tem in range(calConstant.NUM_TEM):\n for row in range(calConstant.NUM_ROW):\n for end in range(calConstant.NUM_END):\n for fe in range(calConstant.NUM_FE):\n \n qx = q[tem,row,end,fe,:]\n rng = calConstant.CDAC_FINE\n \n # if there are not enough points in the FINE range or high point is out of range, \n # try the COARSE range\n\n if len(numarray.nonzero(qx)[0]) < 3 or qx[-1]:\n \n qx[...] = \\\n (coarseThresholds[tem,row,end,fe,:] > adcs0[tem,row,end,fe,:]) & \\\n (coarseThresholds[tem,row,end,fe,:] < adcs1[tem,row,end,fe,:])\n \n tholds = coarseThresholds[tem,row,end,fe,:]\n rng = calConstant.CDAC_COARSE\n \n else:\n \n tholds = fineThresholds[tem,row,end,fe,:] \n \n tholds = tholds + bias[tem,row,end,fe,numarray.NewAxis]\n d = numarray.compress(qx, D0)\n a = numarray.compress(qx, tholds)\n # try to fit the preferred range data\n \n fail = False\n \n try:\n import ROOTFit\n import ROOT\n (fitParms, fitErrs, chisq) = ROOTFit.ROOTFit(ROOT.TF1(\"p1\",\"pol1\"),\n d,\n a,\n P0)\n\n dnorm = (chisq / len(d))\n if dnorm > fitLim:\n log.warning(\"fit error > %0.2f on T%d,%s%s,%d\", dnorm, tem, \n calConstant.CROW[row], calConstant.CPM[end], fe)\n fail = True \n except ValueError, e:\n log.error(\"fit excep on T%d,%s%s,%d: %s,%s,%s\", tem, calConstant.CROW[row],\n calConstant.CPM[end], fe, e, d, a)\n fail = True\n \n # check slope parameter value for reasonableness\n \n if not fail and rng == calConstant.CDAC_FINE:\n m = fitParms[1]\n if m < limLow[tem,row,end,fe] or m > limHigh[tem,row,end,fe]:\n log.warning(\"bad slope %0.3f on T%d,%s%s,%d\", m, tem, \n calConstant.CROW[row], calConstant.CPM[end], fe)\n fail = True\n \n # if the range is FINE, and the fit fails, or the resulting slope is\n # out of limits, try to get fit parameters for the COARSE range\n \n if fail:\n \n log.warning(\"trying COARSE range for T%d,%s%s,%d\", tem, calConstant.CROW[row],\n calConstant.CPM[end], fe)\n \n qx[...] = \\\n (coarseThresholds[tem,row,end,fe,:] > adcs0[tem,row,end,fe,:]) & \\\n (coarseThresholds[tem,row,end,fe,:] < adcs1[tem,row,end,fe,:])\n \n tholds = coarseThresholds[tem,row,end,fe,:] \n rng = calConstant.CDAC_COARSE\n \n tholds = tholds + bias[tem,row,end,fe,numarray.NewAxis]\n d = numarray.compress(qx, D0)\n a = numarray.compress(qx, tholds)\n \n try:\n import ROOTFit\n import ROOT\n (fitParms, fitErrs, chisq) = ROOTFit.ROOTFit(ROOT.TF1(\"p1\",\"pol1\"),\n d,\n a,\n P0)\n \n fail = False\n except ValueError, e:\n log.error(\"fit excep on T%d,%s%s,%d: %s,%s,%s\", tem, calConstant.CROW[row],\n calConstant.CPM[end], fe, e, d, a)\n \n # save fit parameters (or substitute)\n mevs[tem,row,end,fe,0] = fitParms[1]\n mevs[tem,row,end,fe,1] = fitParms[0] \n ranges[tem,row,end,fe] = rng\n \n return (mevs, ranges)" ]
[ "0.65322304", "0.5709489", "0.56655955", "0.5427672", "0.5351284", "0.5349491", "0.53222907", "0.5319463", "0.53075916", "0.53012884", "0.52932554", "0.52810115", "0.5260214", "0.5258429", "0.5253238", "0.5209871", "0.5207418", "0.51765686", "0.5168372", "0.5157539", "0.5156934", "0.51475704", "0.5143649", "0.51221347", "0.5119748", "0.51154697", "0.51102453", "0.5096355", "0.5087698", "0.50833255" ]
0.7241779
0
Takes OPTAA data recorded by the Cabled Shallow Profiler system and cleans up the data set to make it more userfriendly. Primary task is renaming parameters and dropping some that are of limited use. Additionally, recalculate the intermediate products (e.g. absorption and attenuation) and add them to the data set. Finally, add the estimated chlorophyll and POC concentrations to the data set. Will test the data set to determine if more than one deployment is present. If so, will raise an exception with an error message. ACS processing requires that the data be processed one deployment at a time in order to properly assign calibration coefficients and pad wavelength arrays.
def optaa_profiler(ds, cal_file): # check to see if there is more than one deployment in the data set if len(np.unique(ds['deployment'].values)) > 1: raise ValueError('More than one deployment in the data set. Please structure processing request to process ' 'one deployment at a time.') # drop some of the variables: # internal_timestamp == time, redundant so can remove # pressure_counts == none of the OOI OPTAAs have a pressure sensor # serial_number == available in the global attributes # meter_type == always the same, not needed # packet_type == always the same, not needed # record_length == always the same, not needed # checksum == not needed, used in data parsing ds = ds.drop(['internal_timestamp', 'pressure_counts', 'serial_number', 'meter_type', 'packet_type', 'record_length', 'checksum']) # check for data from a co-located CTD, if not present create the variables using NaN's as the fill value if 'sea_water_temperature' not in ds.variables: ds['sea_water_temperature'] = ('time', ds['deployment'].data * np.nan) ds['sea_water_practical_salinity'] = ('time', ds['deployment'].data * np.nan) # pull out the number of wavelengths and serial number and then drop the variable (part of the metadata) num_wavelengths = ds.num_wavelengths.values[0].astype(int) serial_number = int(re.sub('[^0-9]', '', ds.attrs['SerialNumber'])) ds = ds.drop('num_wavelengths') # load the calibration coefficients uid = ds.attrs['AssetUniqueID'] start_time = ds['time'][0].values.astype(float) / 10 ** 9 cal = load_cal_coefficients(cal_file, uid, start_time) # check the calibration coefficients against the deployment data if cal.coeffs['serial_number'] != serial_number: raise Exception('Serial Number mismatch between ac-s data and the device file.') if cal.coeffs['num_wavelengths'] != num_wavelengths: raise Exception('Number of wavelengths mismatch between ac-s data and the device file.') # remove the units from the variable name rename = { 'a_signal_dark_counts': 'a_signal_dark', 'a_reference_dark_counts': 'a_reference_dark', 'a_signal_counts': 'a_signal', 'a_reference_counts': 'a_reference', 'c_signal_dark_counts': 'c_signal_dark', 'c_reference_dark_counts': 'c_reference_dark', 'c_signal_counts': 'c_signal', 'c_reference_counts': 'c_reference', 'int_ctd_pressure': 'sea_water_pressure', 'wavelength': 'wavelength_number' } ds = ds.rename(rename) # Delete the first 45 seconds of the data record per recommendation from the vendor. Note, originally the vendor # recommended deleting the first 45 seconds, then 60 seconds and then 120 seconds. They never provided a data # based reason for the change in recommendation. Within OOI, instruments were programmed to run for 60 seconds, # then 120 seconds and then 240 seconds ... and it is all mixed up across the various data sets. So, we are # going to use the 45-second recommendation and apply it to all data sets. If the vendor ever provides an analysis # justifying the change in recommendation, we can revisit this. ds.elapsed_run_time.values = ds.elapsed_run_time.where(ds.elapsed_run_time / 1000 > 45) ds = ds.dropna(dim='time', subset=['elapsed_run_time']) # convert internal and external temperature sensors from raw counts to degrees Celsius ds['internal_temp'] = opt_internal_temp(ds['internal_temp_raw']) ds['external_temp'] = opt_external_temp(ds['external_temp_raw']) # create a profile variable to uniquely identify profiles within the dataset...if the profiler moved print('Determining profiler movement ...') pks, dzdt = updown(ds['depth'].values, 10) if len(pks) == 2 and (pks[0] == 0 and pks[1] == len(dzdt) - 1): # the profiler never moved, so treat the whole data set as a time series print('Profiler was parked for the entire deployment, treating data as a time series using burst averaging.') ds['profile'] = ('time', np.zeros(len(ds['time'])).astype(int) - 1) # calculate the median of the remaining data per burst measurement (configured to run hourly for 3 minutes) print('Calculating burst averages ...') start_time = time.time() binned = ds.resample(time='3600s', base=1800, loffset='1800s', skipna=True).reduce(np.median, dim='time', keep_attrs=True) binned = binned.where(~np.isnan(binned.deployment), drop=True) stop_time = time.time() elapsed_time = stop_time - start_time print('... burst averaging complete. Elapsed time: %f seconds' % elapsed_time) else: # the profiler moved, so treat the data as a series of profiles print('Profiler moved during the deployment, treating data as a series of profiles.') print('Sub-selecting upcast data only from the data set ...') dzdt = xr.DataArray(dzdt, dims='time', coords={'time': ds['time']}) ds = ds.where(dzdt < 0, drop=True) print('Creating and adding a profile variable to the data set ...') ds = create_profile_id(ds) # group the data by profile number and bin the data into 25 cm depth bins (nominal ascent rate of the shallow # profiler is 5 cm/s, binning at 25 cm will help to reduce the noise in the data and speed up subsequent # processing). profiles = ds.groupby('profile') profiles = [profile[1] for profile in profiles] partial_binning = partial(bin_profiles, site_depth=200, bin_size=0.25) with ProcessPoolExecutor(max_workers=N_CORES) as executor: binned = list(tqdm(executor.map(partial_binning, profiles), total=len(profiles), desc='Smoothing and binning each profile into 25 cm depth bins ...', file=sys.stdout)) # reset the dataset now using binned profiles binned = [i[0] for i in binned if i is not None] binned = xr.concat(binned, 'time') binned = binned.sortby(['profile', 'time']) # confirm dimension order is correct for the wavelength arrays (sometimes the order gets flipped # during the binning process) binned['wavelength_a'] = binned.wavelength_a.transpose(*['time', 'wavelength_number']) binned['wavelength_c'] = binned.wavelength_c.transpose(*['time', 'wavelength_number']) # reclaim some memory del ds, pks, dzdt, profiles, partial_binning, executor # re-process the raw data in order to create the intermediate variables, correcting for the holographic # grating, applying the temperature and salinity corrections and applying a baseline scatter correction # to the absorption data. All intermediate processing outputs are added to the data set. print('Re-processing the raw data, creating intermediate data products ...') binned = apply_dev(binned, cal.coeffs) binned = apply_tscorr(binned, cal.coeffs, binned.sea_water_temperature, binned.sea_water_practical_salinity) binned = apply_scatcorr(binned, cal.coeffs) # estimate chlorophyll and POC and calculate select absorption ratios binned = estimate_chl_poc(binned, cal.coeffs) binned = calculate_ratios(binned) # create a xarray dataset of the 2D variables, padding the number of wavelengths to a consistent # length of 100 using fill values. wavelength_number = np.arange(100).astype(int) # used as a dimensional variable pad = 100 - num_wavelengths fill_nan = np.tile(np.ones(pad) * np.nan, (len(binned.time), 1)) fill_int = np.tile(np.ones(pad) * FILL_INT, (len(binned.time), 1)) wavelength_a = np.concatenate([binned.wavelength_a.values, fill_nan], axis=1) wavelength_c = np.concatenate([binned.wavelength_c.values, fill_nan], axis=1) ac = xr.Dataset({ 'wavelength_a': (['time', 'wavelength_number'], wavelength_a), 'a_signal': (['time', 'wavelength_number'], np.concatenate([binned.a_signal, fill_int], axis=1).astype(int)), 'a_reference': (['time', 'wavelength_number'], np.concatenate([binned.a_reference, fill_int], axis=1).astype(int)), 'optical_absorption': (['time', 'wavelength_number'], np.concatenate([binned.optical_absorption, fill_nan], axis=1)), 'apg': (['time', 'wavelength_number'], np.concatenate([binned.apg, fill_nan], axis=1)), 'apg_ts': (['time', 'wavelength_number'], np.concatenate([binned.apg_ts, fill_nan], axis=1)), 'apg_ts_s': (['time', 'wavelength_number'], np.concatenate([binned.apg_ts_s, fill_nan], axis=1)), 'wavelength_c': (['time', 'wavelength_number'], wavelength_c), 'c_signal': (['time', 'wavelength_number'], np.concatenate([binned.c_signal, fill_int], axis=1).astype(int)), 'c_reference': (['time', 'wavelength_number'], np.concatenate([binned.c_reference, fill_int], axis=1).astype(int)), 'beam_attenuation': (['time', 'wavelength_number'], np.concatenate([binned.beam_attenuation, fill_nan], axis=1)), 'cpg': (['time', 'wavelength_number'], np.concatenate([binned.cpg, fill_nan], axis=1)), 'cpg_ts': (['time', 'wavelength_number'], np.concatenate([binned.cpg_ts, fill_nan], axis=1)), }, coords={'time': (['time'], binned.time.values), 'wavelength_number': wavelength_number}) # drop the original 2D variables from the binned data set drop = binned.drop(['wavelength_number', 'wavelength_a', 'a_signal', 'a_reference', 'optical_absorption', 'apg', 'apg_ts', 'apg_ts_s', 'wavelength_c', 'c_signal', 'c_reference', 'beam_attenuation', 'cpg', 'cpg_ts']) # reset the data type for the 'a' and 'c' signal and reference dark values, and the other raw parameters int_arrays = ['a_signal_dark', 'a_reference_dark', 'c_signal_dark', 'c_reference_dark', 'internal_temp_raw', 'external_temp_raw', 'deployment', 'profile'] for k in drop.variables: if k in int_arrays: drop[k] = drop[k].astype(int) # recombine the two datasets optaa = xr.merge([drop, ac]) # reset the attributes, which the merging drops optaa.attrs = binned.attrs for v in optaa.variables: optaa[v].attrs = binned[v].attrs # reset some attributes for key, value in ATTRS.items(): for atk, atv in value.items(): if key in optaa.variables: optaa[key].attrs[atk] = atv # add the original variable name as an attribute, if renamed for key, value in rename.items(): optaa[value].attrs['ooinet_variable_name'] = key # add the actual number of wavelengths to the dataset as an attribute optaa['wavelength_number'].attrs['actual_wavelengths'] = num_wavelengths # if the filter index was used to adjust the spectral jumps, add that attribute to the data set if cal.coeffs['grate_index']: optaa['a_jump_offsets'].attrs['grate_index'] = cal.coeffs['grate_index'] optaa['c_jump_offsets'].attrs['grate_index'] = cal.coeffs['grate_index'] return optaa
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def optaa_benthic(ds, cal_file):\n # check to see if there is more than one deployment in the data set\n if len(np.unique(ds['deployment'].values)) > 1:\n raise ValueError('More than one deployment in the data set. Please structure processing request to process '\n 'one deployment at a time.')\n\n # drop some of the variables:\n # internal_timestamp == time, redundant so can remove\n # pressure_counts == none of the OOI OPTAAs have a pressure sensor\n # serial_number == available in the global attributes\n # meter_type == always the same, not needed\n # packet_type == always the same, not needed\n # record_length == always the same, not needed\n # checksum == not needed, used in data parsing\n ds = ds.drop(['internal_timestamp', 'pressure_counts', 'serial_number', 'meter_type', 'packet_type',\n 'record_length', 'checksum'])\n\n # check for data from a co-located CTD, if not present create the variables using NaN's as the fill value\n if 'sea_water_temperature' not in ds.variables:\n ds['sea_water_temperature'] = ('time', ds['deployment'].data * np.nan)\n ds['sea_water_practical_salinity'] = ('time', ds['deployment'].data * np.nan)\n\n # pull out the number of wavelengths and serial number and then drop the variables (part of the metadata)\n num_wavelengths = ds.num_wavelengths.values[0].astype(int)\n serial_number = int(re.sub('[^0-9]', '', ds.attrs['SerialNumber']))\n ds = ds.drop('num_wavelengths')\n\n # load the calibration coefficients\n uid = ds.attrs['AssetUniqueID']\n start_time = ds['time'][0].values.astype(float) / 10 ** 9\n cal = load_cal_coefficients(cal_file, uid, start_time)\n\n # check the calibration coefficients against the deployment data\n if cal.coeffs['serial_number'] != serial_number:\n raise Exception('Serial Number mismatch between ac-s data and the device file.')\n if cal.coeffs['num_wavelengths'] != num_wavelengths:\n raise Exception('Number of wavelengths mismatch between ac-s data and the device file.')\n\n # remove the units from the variable names\n rename = {\n 'a_signal_dark_counts': 'a_signal_dark',\n 'a_reference_dark_counts': 'a_reference_dark',\n 'a_signal_counts': 'a_signal',\n 'a_reference_counts': 'a_reference',\n 'c_signal_dark_counts': 'c_signal_dark',\n 'c_reference_dark_counts': 'c_reference_dark',\n 'c_signal_counts': 'c_signal',\n 'c_reference_counts': 'c_reference',\n 'wavelength': 'wavelength_number'\n }\n ds = ds.rename(rename)\n\n # Delete the first 45 seconds of the data record per recommendation from the vendor. Note, originally the vendor\n # recommended deleting the first 45 seconds, then 60 seconds and then 120 seconds. They never provided a data\n # based reason for the change in recommendation. Within OOI, instruments were programmed to run for 60 seconds,\n # then 120 seconds and then 240 seconds ... and it is all mixed up across the various data sets. So, we are\n # going to use the 45-second recommendation and apply it to all data sets. If the vendor ever provides an analysis\n # justifying the change in recommendation, we can revisit this.\n ds.elapsed_run_time.values = ds.elapsed_run_time.where(ds.elapsed_run_time / 1000 > 45)\n ds = ds.dropna(dim='time', subset=['elapsed_run_time'])\n\n # convert internal and external temperature sensors from raw counts to degrees Celsius\n ds['internal_temp'] = opt_internal_temp(ds['internal_temp_raw'])\n ds['external_temp'] = opt_external_temp(ds['external_temp_raw'])\n\n # calculate the median of the remaining data per burst measurement (configured to run hourly for 3 minutes)\n # calculate the median of the remaining data per burst measurement\n print('Calculating burst averages ...')\n start_time = time.time()\n burst = ds.resample(time='3600s', base=1800, loffset='1800s', skipna=True).reduce(np.median, dim='time',\n keep_attrs=True)\n burst = burst.where(~np.isnan(burst.deployment), drop=True)\n stop_time = time.time()\n elapsed_time = stop_time - start_time\n print('... burst averaging complete. Elapsed time: %f seconds' % elapsed_time)\n\n # re-process the raw data in order to create the intermediate variables, correcting for the holographic\n # grating, applying the temperature and salinity corrections and applying a baseline scatter correction\n # to the absorption data. All intermediate processing outputs are added to the data set.\n burst = apply_dev(burst, cal.coeffs)\n burst = apply_tscorr(burst, cal.coeffs, burst.sea_water_temperature, burst.sea_water_practical_salinity)\n burst = apply_scatcorr(burst, cal.coeffs)\n\n # add the jump offsets as NaN's if the grating index correction was not used\n if 'a_jump_offsets' not in ds.variables:\n ds['a_jump_offsets'] = ('time', ds['deployment'].data * np.nan)\n ds['c_jump_offsets'] = ('time', ds['deployment'].data * np.nan)\n\n # estimate chlorophyll and POC and calculate select absorption ratios\n burst = estimate_chl_poc(burst, cal.coeffs)\n burst = calculate_ratios(burst)\n\n # create a xarray dataset of the 2D variables, padding the number of wavelengths to a consistent\n # length of 100 using fill values.\n wavelength_number = np.arange(100).astype(int) # used as a dimensional variable\n pad = 100 - num_wavelengths\n fill_nan = np.tile(np.ones(pad) * np.nan, (len(burst.time), 1))\n fill_int = np.tile(np.ones(pad) * FILL_INT, (len(burst.time), 1))\n\n wavelength_a = np.concatenate([burst.wavelength_a.values, fill_nan], axis=1)\n wavelength_c = np.concatenate([burst.wavelength_c.values, fill_nan], axis=1)\n\n ac = xr.Dataset({\n 'wavelength_a': (['time', 'wavelength_number'], wavelength_a),\n 'a_signal': (['time', 'wavelength_number'], np.concatenate([burst.a_signal, fill_int], axis=1).astype(int)),\n 'a_reference': (['time', 'wavelength_number'], np.concatenate([burst.a_reference, fill_int],\n axis=1).astype(int)),\n 'optical_absorption': (['time', 'wavelength_number'], np.concatenate([burst.optical_absorption, fill_nan],\n axis=1)),\n 'apg': (['time', 'wavelength_number'], np.concatenate([burst.apg, fill_nan], axis=1)),\n 'apg_ts': (['time', 'wavelength_number'], np.concatenate([burst.apg_ts, fill_nan], axis=1)),\n 'apg_ts_s': (['time', 'wavelength_number'], np.concatenate([burst.apg_ts_s, fill_nan], axis=1)),\n 'wavelength_c': (['time', 'wavelength_number'], wavelength_c),\n 'c_signal': (['time', 'wavelength_number'], np.concatenate([burst.c_signal, fill_int], axis=1).astype(int)),\n 'c_reference': (['time', 'wavelength_number'], np.concatenate([burst.c_reference, fill_int],\n axis=1).astype(int)),\n 'beam_attenuation': (['time', 'wavelength_number'], np.concatenate([burst.beam_attenuation, fill_nan], axis=1)),\n 'cpg': (['time', 'wavelength_number'], np.concatenate([burst.cpg, fill_nan], axis=1)),\n 'cpg_ts': (['time', 'wavelength_number'], np.concatenate([burst.cpg_ts, fill_nan], axis=1)),\n }, coords={'time': (['time'], burst.time.values), 'wavelength_number': wavelength_number})\n\n # drop the original 2D variables from the burst data set\n drop = burst.drop(['wavelength_number', 'wavelength_a', 'a_signal', 'a_reference',\n 'optical_absorption', 'apg', 'apg_ts', 'apg_ts_s',\n 'wavelength_c', 'c_signal', 'c_reference',\n 'beam_attenuation', 'cpg', 'cpg_ts'])\n\n # reset the data type for the 'a' and 'c' signal and reference dark values, and the other raw parameters\n int_arrays = ['a_signal_dark', 'a_reference_dark', 'c_signal_dark', 'c_reference_dark',\n 'internal_temp_raw', 'external_temp_raw', 'deployment']\n for k in drop.variables:\n if k in int_arrays:\n drop[k] = drop[k].astype(int)\n\n # recombine the two datasets\n optaa = xr.merge([drop, ac])\n\n # reset the attributes, which the merging drops\n optaa.attrs = burst.attrs\n for v in optaa.variables:\n optaa[v].attrs = burst[v].attrs\n\n # reset some attributes\n for key, value in ATTRS.items():\n for atk, atv in value.items():\n if key in optaa.variables:\n optaa[key].attrs[atk] = atv\n\n # add the original variable name as an attribute, if renamed\n for key, value in rename.items():\n optaa[value].attrs['ooinet_variable_name'] = key\n\n # add the actual number of wavelengths to the dataset as an attribute\n optaa['wavelength_number'].attrs['actual_wavelengths'] = num_wavelengths\n\n # if the filter index was used to adjust the spectral jumps, add that attribute to the data set\n if cal.coeffs['grate_index']:\n optaa['a_jump_offsets'].attrs['grate_index'] = cal.coeffs['grate_index']\n optaa['c_jump_offsets'].attrs['grate_index'] = cal.coeffs['grate_index']\n\n return optaa", "def sanitize(self):\n # Early versions of CASU catalogues chave multiple columns 'Blank'\n # Numpy will throw an exception if multiple columns have the same\n # name, so we need to rename these columns.\n n_columns = len(self.fits[self.ccd].columns)\n for col in range(26, n_columns, 1):\n name = self.fits[self.ccd].columns[col].name\n if name == 'Blank':\n self.fits[self.ccd].columns[col].name = 'Blank%d' % col\n\n # The headers contain a combination of old- and modern-\n # style WCS parameters for the ZPN projection coefficients, which\n # confuses libwcs. Moreover, in a few cases the keyword values\n # are plainly wrong. Hence we remove the keywords.\n for kw in ['PV1_0', 'PV1_1', 'PV1_2', 'PV1_3',\n 'PV2_0', 'PV2_1', 'PV2_2', 'PV2_3',\n 'PV3_0', 'PV3_1', 'PV3_3', 'PV3_3',\n 'PROJP1', 'PROJP3', 'WAT1_001', 'WAT2_001',\n 'RADECSYS']:\n del self.fits[self.ccd].header[kw]\n\n # ..and enforce the parameters wich have been used by the pipeline\n self.fits[self.ccd].header['EQUINOX'] = 2000.0\n self.fits[self.ccd].header['PV2_1'] = 1.0\n self.fits[self.ccd].header['PV2_3'] = 220.0\n self.fits[self.ccd].header['CUNIT1'] = 'deg'\n self.fits[self.ccd].header['CUNIT2'] = 'deg'\n self.fits[self.ccd].header['RADESYSa'] = 'ICRS'", "def cleanup(adata, del_prediction=False, del_2nd_moments=False):\n\n if \"pca_fit\" in adata.uns_keys():\n adata.uns[\"pca_fit\"] = None\n if \"velocyto_SVR\" in adata.uns_keys():\n adata.uns[\"velocyto_SVR\"][\"SVR\"] = None\n if \"umap_fit\" in adata.uns_keys():\n adata.uns[\"umap_fit\"][\"fit\"] = None\n if \"velocity_pca_fit\" in adata.uns_keys():\n adata.uns[\"velocity_pca_fit\"] = None\n if \"kmc\" in adata.uns_keys():\n adata.uns[\"kmc\"] = None\n if \"kinetics_heatmap\" in adata.uns_keys():\n adata.uns.pop(\"kinetics_heatmap\")\n if \"hdbscan\" in adata.uns_keys():\n adata.uns.pop(\"hdbscan\")\n\n VF_keys = [i if i.startswith(\"VecFld\") else None for i in adata.uns_keys()]\n for i in VF_keys:\n if i is not None and \"VecFld2D\" in adata.uns[i].keys():\n del adata.uns[i][\"VecFld2D\"]\n\n fate_keys = [i if i.startswith(\"fate\") else None for i in adata.uns_keys()]\n for i in fate_keys:\n if i is not None:\n if adata.uns[i][\"init_cells\"] is not None:\n adata.uns[i][\"init_cells\"] = list(adata.uns[i][\"init_cells\"])\n if \"prediction\" in adata.uns[i].keys():\n if del_prediction:\n del adata.uns[i][\"prediction\"]\n if \"VecFld_true\" in adata.uns[i].keys():\n if adata.uns[i][\"VecFld_true\"] is not None:\n del adata.uns[i][\"VecFld_true\"]\n\n if del_2nd_moments:\n from .tools.utils import remove_2nd_moments\n\n remove_2nd_moments(adata)\n\n return adata", "def clean():\n filter_phase_data()\n combine_phase_data()\n remove_duplicates_phase_data()", "def control_variation(df, outDir, features_to_analyse, \n variables_to_analyse=[\"date_yyyymmdd\"], \n remove_outliers=True, \n p_value_threshold=0.05, \n PCs_to_keep=10):\n \n # Record non-data columns before dropping feature columns \n other_colnames = [col for col in df.columns if col not in features_to_analyse]\n \n # Drop columns that contain only zeros\n colnames_before = list(df.columns)\n AllZeroFeats = df[features_to_analyse].columns[(df[features_to_analyse] == 0).all()]\n df = df.drop(columns=AllZeroFeats)\n colnames_after = list(df.columns)\n zero_cols = [col for col in colnames_before if col not in colnames_after]\n if len(zero_cols) > 0:\n print(\"Dropped %d features with all-zero summaries:\\n%s\" % (len(zero_cols), zero_cols))\n \n # Record feature column names after dropping zero data\n features_to_analyse = [feat for feat in df.columns if feat not in other_colnames]\n \n # Remove outliers from the dataset \n if remove_outliers:\n df, indsOutliers = removeOutliersMahalanobis(df, features_to_analyse)\n remove_outliers = False \n # NB: Ensure Mahalanobis operation to remove outliers is performed only once!\n\n # Check for normality in features to analyse in order decide which \n # statistical test to use: one-way ANOVA (parametric) or Kruskal-Wallis \n # (non-parametric) test\n TEST = check_normality(df, features_to_analyse, p_value_threshold)\n\n # Record name of statistical test used (kruskal/f_oneway)\n test_name = str(TEST).split(' ')[1].split('.')[-1].split('(')[0].split('\\'')[0]\n\n # CONTROL VARIATION: STATS (ANOVAs)\n # - Does N2 worm behaviour on control vary across experiment days? \n # (worms are larger? Shorter L1 diapuase? Camera focus/FOV adjusted? Skewed by non-worm tracked objects?\n # Did not record time when worms were refed! Could be this. If so, worms will be bigger across all foods on that day) \n # - Perform ANOVA to see if features vary across imaging days for control\n # - Perform Tukey HSD post-hoc analyses for pairwise differences between imaging days\n # - Highlight outlier imaging days and investigate reasons why\n # - Save list of top significant features for outlier days - are they size-related features?\n for grouping_variable in variables_to_analyse:\n print(\"\\nTESTING: %s\\n\" % grouping_variable)\n \n if not len(df[grouping_variable].unique()) > 1:\n print(\"Need at least two groups for stats to investigate %s\" % grouping_variable)\n else:\n print(\"Performing %s tests for '%s'\" % (test_name, grouping_variable)) \n \n test_results_df, sigfeats_out = \\\n topfeats_ANOVA_by_group(df, \n grouping_variable, \n features_to_analyse,\n TEST,\n p_value_threshold)\n \n # Ensure directory exists to save results\n Path(outDir).mkdir(exist_ok=True, parents=True)\n \n # Define outpaths\n froot = 'control_variation_in_' + grouping_variable + '_' + test_name\n stats_outpath = outDir / (froot + \"_results.csv\")\n sigfeats_outpath = outDir / (froot + \"_significant_features.csv\")\n \n # Save test statistics + significant features list to file\n test_results_df.to_csv(stats_outpath)\n sigfeats_out.to_csv(sigfeats_outpath, header=False)\n\n # Box plots\n plotDir = outDir / \"Plots\"\n topfeats_boxplots_by_group(df, \n test_results_df, \n grouping_variable,\n plot_save_dir=plotDir, #save to plotDir\n p_value_threshold=p_value_threshold)\n \n # PCA (coloured by grouping variable, eg. experiment date)\n df = doPCA(df, \n grouping_variable, \n features_to_analyse,\n plot_save_dir = plotDir,\n PCs_to_keep = PCs_to_keep)", "def opt_optical_absorption(aref, asig, traw, awl, aoff, tcal, tbins, ta_arr,\n cpd_ts, cwl, T, PS, rwlngth=715.):\n # reset shapes of input arguments\n # using np.array ndmin=# seems faster than using np.atleast_#d\n aref = np.array(aref, ndmin=2)\n asig = np.array(asig, ndmin=2)\n traw = np.array(traw, ndmin=1)\n awl = np.around(np.array(awl, ndmin=2), decimals=1)\n aoff = np.array(aoff, ndmin=2)\n tcal = np.array(tcal, ndmin=1)\n tbins = np.array(tbins, ndmin=2)\n # note, np.atleast_3d appends the extra dimension;\n # np.array using ndmin prepends the extra dimension.\n ta_arr = np.array(ta_arr, ndmin=3)\n cpd_ts = np.array(cpd_ts, ndmin=2)\n cwl = np.array(cwl, ndmin=2)\n T = np.array(T, ndmin=1)\n PS = np.array(PS, ndmin=1)\n\n # size up inputs\n npackets = awl.shape[0]\n nwavelengths = awl.shape[1]\n # initialize output array\n apd_ts_s = np.zeros([npackets, nwavelengths])\n\n for ii in range(npackets):\n\n # calculate the internal instrument temperature [deg_C]\n tintrn = opt_internal_temp(traw[ii])\n\n # calculate the uncorrected optical absorption coefficient [m^-1]\n apd, _ = opt_pd_calc(aref[ii, :], asig[ii, :], aoff[ii, :], tintrn,\n tbins[ii, :], ta_arr[ii, :, :])\n\n # correct the optical absorption coefficient for temperature and salinity.\n apd_ts = opt_tempsal_corr('a', apd, awl[ii, :], tcal[ii], T[ii], PS[ii])\n\n # correct the optical absorption coefficient for scattering effects\n apd_ts_s_row = opt_scatter_corr(apd_ts, awl[ii, :], cpd_ts[ii, :], cwl[ii, :], rwlngth)\n apd_ts_s[ii, :] = apd_ts_s_row\n\n # return the temperature, salinity and scattering corrected optical\n # absorption coefficient OPTABSN_L2 [m^-1]\n return apd_ts_s", "def data_preparation(self) -> None:\n self.logger.info('data cleaning')\n self.logger.info('num of secs: {}, num of ipo_dates: {}, num of secs with prices: {}'.format(\n len(self.data),\n len(self.ipo_dates),\n len(self.prices)\n ))\n excluded = []\n excluded = [i.lower() for i in excluded]\n self.logger.info(f'number of excluded: {len(excluded)}')\n for i in excluded:\n self.data.pop(i)\n for s in self.data:\n # columns with empty assets sum (empty columns and other situations)\n self.data[s].dropna(axis='columns', how='any', subset=['A_0'], inplace=True)\n # columns with descriptions (polish and english names of values)\n self.data[s].drop(self.data[s].columns[[0, 1]], inplace=True, axis=1)\n\n self.logger.info(f'number of secs after cleaning: {len(self.data)}')\n data_list = [k for k in self.data.values()]\n self.uber_data = pd.concat(data_list, ignore_index=True, axis=1)\n self.uber_data = self.uber_data.transpose()\n self.uber_data = self.uber_data.loc[:, pd.notnull(self.uber_data.columns)]", "def main():\n\n # Create argument parser\n parser = ArgumentParser()\n parser.add_argument('datadir', type=str, help='Directory of LC files')\n parser.add_argument('metatable', type=str,\n help='Metatable containing each object, redshift, peak time guess, mwebv, object type')\n parser.add_argument('--zpt', type=float, default=DEFAULT_ZPT, help='Zero point of LCs')\n parser.add_argument('--lm', type=float, default=DEFAULT_LIM_MAG, help='Survey limiting magnitude')\n parser.add_argument('--outdir', type=str, default='./products/',\n help='Path in which to save the LC data (single file)')\n args = parser.parse_args()\n\n objs, redshifts, obj_types, peaks, ebvs = read_in_meta_table(args.metatable)\n\n # Grab all the LC files in the input directory\n file_names = []\n for obj in objs:\n file_name = args.datadir + 'PS1_PS1MD_' + obj + '.snana.dat'\n file_names.append(file_name)\n\n # Create a list of LC objects from the data files\n lc_list = read_in_LC_files(file_names, objs)\n\n # This needs to be redone when retrained\n # TODO: Need to change this whenever you retrain...\n filt_dict = {'g': 0, 'r': 1, 'i': 2, 'z': 3}\n wvs = np.asarray([5460, 6800, 7450, 8700])\n\n # Update the LC objects with info from the metatable\n my_lcs = []\n for i, my_lc in enumerate(lc_list):\n my_lc.add_LC_info(zpt=args.zpt, mwebv=ebvs[i],\n redshift=redshifts[i], lim_mag=args.lm,\n obj_type=obj_types[i])\n my_lc.get_abs_mags()\n my_lc.sort_lc()\n pmjd = my_lc.find_peak(peaks[i])\n my_lc.shift_lc(pmjd)\n my_lc.correct_time_dilation()\n my_lc.filter_names_to_numbers(filt_dict)\n my_lc.correct_extinction(wvs)\n my_lc.cut_lc()\n my_lc.make_dense_LC(4)\n my_lcs.append(my_lc)\n save_lcs(my_lcs, args.outdir)", "def proforma_report(self, technologies, valuestreams, results, opt_years):\n proforma = super().proforma_report(technologies, valuestreams, results, opt_years)\n proforma_wo_yr_net = proforma.drop('Yearly Net Value', axis=1)\n proforma = self.replacement_costs(proforma_wo_yr_net, technologies)\n proforma = self.zero_out_dead_der_costs(proforma, technologies)\n proforma = self.update_capital_cost_construction_year(proforma, technologies)\n # check if there are are costs on CAPEX YEAR - if there arent, then remove it from proforma\n if not proforma.loc['CAPEX Year', :].any():\n proforma.drop('CAPEX Year', inplace=True)\n # add EOL costs to proforma\n der_eol = self.calculate_end_of_life_value(proforma, technologies, self.inflation_rate,\n opt_years)\n proforma = proforma.join(der_eol)\n if self.ecc_mode:\n for der_inst in technologies:\n if der_inst.tag == \"Load\":\n continue\n # replace capital cost columns with economic_carrying cost\n der_ecc_df, total_ecc = der_inst.economic_carrying_cost_report(\n self.inflation_rate, self.end_year, self.apply_rate)\n # drop original Capital Cost\n proforma.drop(columns=[der_inst.zero_column_name()], inplace=True)\n # drop any replacement costs\n if f\"{der_inst.unique_tech_id()} Replacement Costs\" in proforma.columns:\n proforma.drop(columns=[f\"{der_inst.unique_tech_id()} Replacement Costs\"], inplace=True)\n # add the ECC to the proforma\n proforma = proforma.join(total_ecc)\n # add ECC costs broken out by when initial cost occurs to complete DF\n self.ecc_df = pd.concat([self.ecc_df, der_ecc_df], axis=1)\n else:\n proforma = self.calculate_taxes(proforma, technologies)\n # sort alphabetically\n proforma.sort_index(axis=1, inplace=True)\n proforma.fillna(value=0, inplace=True)\n # recalculate the net (sum of the row's columns)\n proforma['Yearly Net Value'] = proforma.sum(axis=1)\n return proforma", "def execute():\r\n arcpy.AddMessage(\"START BCA Processing\")\r\n arcpy.env.workspace = config.temp_data_gdb\r\n arcpy.env.overwriteOutput = True\r\n sys.path.append(config.notif_system_script_folder)\r\n\r\n # Other Variables\r\n arcpy.AddMessage(\"Import toolbox\")\r\n arcpy.ImportToolbox(config.notif_toolbox)\r\n REGEX_FOR_INVALID_CHARS = re.compile(r'[^0-9a-zA-Z]+')\r\n todayDate = datetime.datetime.now().strftime(\"%Y%m%d\")\r\n logFile = file(\r\n config.report_processing_log + \"\\\\\" + todayDate + \"_NotificationSystemLog\" + \".txt\", \"a\")\r\n\r\n\r\n # get all unzipped files uploaded to shared folder\r\n configfiles = [os.path.join(dirpath, f)\r\n for dirpath, dirnames, files in os.walk(config.SharedFolder)\r\n for f in files if f.endswith('.csv') or f.endswith('.xls') or f.endswith('.xlsx') or f.endswith('.XLS')]\r\n\r\n correct_config_files = [f for f in configfiles if \"\\BCAWeeklyPermitReport\\\\\" in f]\r\n\r\n # PREPARE workspace\r\n arcpy.AddMessage(\"Preparing workspace...\")\r\n for BCAreport in correct_config_files:\r\n\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n\r\n MukimConstruct = arcpy.SearchCursor(config.MukimConstructSource)\r\n PermitDateExists = False\r\n\r\n for row in MukimConstruct:\r\n aux = input_file_name[:8]\r\n if \"CORRECTED\" not in BCAreport.upper():\r\n filedate = datetime.datetime.strptime(aux, \"%Y%m%d\")\r\n else:\r\n clean_filename = input_file_name.split(\".\")[0]\r\n filedate = datetime.datetime.strptime(clean_filename[-8:], \"%Y%m%d\")\r\n if filedate == row.PERMIT_DATE and \"CORRECTED\" not in BCAreport.upper():\r\n PermitDateExists = True\r\n break\r\n if PermitDateExists and \"CORRECTED\" not in BCAreport.upper():\r\n PermitDateExistsLog = file(\r\n config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] +\r\n \" file's Permit Date already exists\" + \".log\",\r\n \"a\")\r\n PermitDateExistsLog.write(\r\n \"Permit Date for the file \" + input_file_name + \" already exists in Mukim Construct at \" + str(\r\n datetime.datetime.now()))\r\n logFile.writelines(\r\n \"Permit Date for the file \" + input_file_name + \" already exists in Mukim Construct at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n\r\n else:\r\n\r\n # 00. Creation of geodatabases that will serve as workspaces\r\n logFile.writelines(\"00 Creation of temp gdb starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n if arcpy.Exists(config.TempDataGDB):\r\n arcpy.Delete_management(config.TempDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Temp_data.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Temp_data.gdb\")\r\n\r\n if arcpy.Exists(config.SDEDataGDB):\r\n arcpy.Delete_management(config.SDEDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Source.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Source.gdb\")\r\n\r\n if arcpy.Exists(config.CurrentMukimConstructDataGDB):\r\n arcpy.Delete_management(config.CurrentMukimConstructDataGDB)\r\n arcpy.CreateFileGDB_management(config.Notification, \"Final_data.gdb\")\r\n else:\r\n arcpy.CreateFileGDB_management(config.Notification, \"Final_data.gdb\")\r\n\r\n logFile.writelines(\"00 Creation of temp gdb ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 01. Import the base data\r\n logFile.writelines(\"01 Import of base data starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.CurrentMukimConstructDataGDB,\r\n \"MUKIM_CONSTRUCT\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructByProjSource, config.CurrentMukimConstructDataGDB,\r\n \"MUKIM_CONSTRUCT_BYPROJ\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.DepotSource, config.SDEDataGDB, \"DepotBoundary\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.CatchmentSource, config.SDEDataGDB, \"CatchmentBoundary\", \"\", \"\", \"\")\r\n arcpy.FeatureClassToFeatureClass_conversion(config.LandlotSource, config.TempDataGDB, \"Land_lot\", \"\", \"\", \"\")\r\n # Calculate the lot key without letter\r\n arcpy.AddField_management(config.LandLot, \"Lotkey_wo_letter\", \"TEXT\", \"\", \"\", \"10\", \"\", \"NULLABLE\", \"NON_REQUIRED\",\r\n \"\")\r\n arcpy.CalculateField_management(config.LandLot, \"Lotkey_wo_letter\", \"!lot_key![:10]\", \"PYTHON\", \"\")\r\n\r\n logFile.writelines(\"01 Import of base data ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n\r\n # START THE LOOP TO PROCESS ALL THE FILES\r\n clcounter = 0\r\n\r\n if len(correct_config_files) == 0:\r\n logFile.writelines(\"No BCA report to process at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n arcpy.AddMessage(\"Processing files...\")\r\n for BCAreport in configfiles:\r\n\r\n clcounter += 1\r\n arcpy.AddMessage(BCAreport)\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n MukimConstruct = arcpy.SearchCursor(config.MukimConstructSource)\r\n PermitDateExists = False\r\n\r\n # CHEKC FILE DATE EXISTS\r\n for row in MukimConstruct:\r\n aux = input_file_name[:8]\r\n if \"CORRECTED\" not in BCAreport.upper():\r\n filedate = datetime.datetime.strptime(aux, \"%Y%m%d\")\r\n else:\r\n clean_filename = input_file_name.split(\".\")[0]\r\n filedate = datetime.datetime.strptime(clean_filename[-8:], \"%Y%m%d\")\r\n if filedate == row.PERMIT_DATE and \"CORRECTED\" not in input_file_name.upper():\r\n PermitDateExists = True\r\n break\r\n\r\n HEADERVALID = True\r\n with xlrd.open_workbook(BCAreport) as wb:\r\n sh = wb.sheet_by_index(0)\r\n for r in range(sh.nrows):\r\n colcount = 0\r\n if sh.row_values(r)[colcount] == 'Error_Message':\r\n HEADERVALID = True\r\n elif sh.row_values(r)[colcount] == 'Project Ref No' or sh.row_values(r)[colcount] == 'Project_Ref_No':\r\n HEADERVALID = True\r\n else:\r\n PermitDateExistsLog = file(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[\r\n 0] + \" file's header format is not acceptable for processing\" + \".log\", \"a\")\r\n PermitDateExistsLog.write(\r\n \"The header format for the file \" + input_file_name + \" is not acceptable for processing at \" + str(\r\n datetime.datetime.now()))\r\n logFile.writelines(\r\n \"The header format for the file \" + input_file_name + \" is not acceptable for processing at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n HEADERVALID = False\r\n break\r\n\r\n if not PermitDateExists and HEADERVALID:\r\n logFile.writelines(\"Starts processing \" + BCAreport + \" at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # Status update to run/not run the SiteInspection Update\r\n Log_SiteInspectionUpdate = file(config.SiteInspectionUpdate, \"w\")\r\n Log_SiteInspectionUpdate.writelines(\"NO\")\r\n Log_SiteInspectionUpdate.close()\r\n\r\n # 02. Import the BCA report to a geodatabase table\r\n logFile.writelines(\"02 Import of table to gdb starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\ConvertedBCAreport_02\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\ConvertedBCAreport_02\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"ConvertedBCAreport_02\", config.TemplateConvertedBCAreport)\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"ConvertedBCAreport_02\", config.TemplateConvertedBCAreport)\r\n if arcpy.Exists(BCAreport[:-5] + '_err' + '.csv'):\r\n # rename old error report\r\n os.remove(BCAreport[:-5] + '_err' + '.csv')\r\n else:\r\n result = \"Error file does not exist\"\r\n if BCAreport.endswith('.xls') or BCAreport.endswith('.xlsx') or BCAreport.endswith('.XLS'):\r\n rows_out = arcpy.InsertCursor(config.BCAReportGDBTable)\r\n fldlist = arcpy.ListFields(config.BCAReportGDBTable)\r\n fldlist.pop(0)\r\n with xlrd.open_workbook(BCAreport) as wb:\r\n sh = wb.sheet_by_index(0)\r\n for r in range(sh.nrows):\r\n colcount = 0\r\n if sh.row_values(r)[colcount] != 'Error_Message':\r\n colcount = 0\r\n else:\r\n colcount = 1\r\n break\r\n for r in range(sh.nrows):\r\n colcounter = colcount\r\n if r > 0:\r\n new_row_out = rows_out.newRow()\r\n for efld in fldlist:\r\n if efld.name <> 'OBJECTID' and efld.name <> 'ConcatFields':\r\n new_row_out.setValue(efld.name, sh.row_values(r)[colcounter])\r\n colcounter += 1\r\n\r\n logFile.writelines(\"Inserting: \" + str(new_row_out) + \"\\n\")\r\n rows_out.insertRow(new_row_out)\r\n del rows_out, new_row_out\r\n\r\n elif BCAreport.endswith('.csv'):\r\n\r\n BCAreportread = csv.DictReader(open(BCAreport, 'rb'), delimiter=',', quotechar='\"')\r\n rows_out = arcpy.InsertCursor(config.BCAReportGDBTable)\r\n for attribute in BCAreportread:\r\n new_row_out = rows_out.newRow()\r\n new_row_out.Project_Ref_No = attribute['Project_Ref_No']\r\n new_row_out.Project_Title = attribute['Project_Title']\r\n new_row_out.House_Blk_No = attribute['House_Blk_No']\r\n new_row_out.Road_Name = attribute['Road_Name']\r\n new_row_out.Level_No = attribute['Level_No']\r\n new_row_out.Unit_No = attribute['Unit_No']\r\n new_row_out.Building_Name = attribute['Building_Name']\r\n new_row_out.Postal_Code = attribute['Postal_Code']\r\n new_row_out.Project_Mukim_nos = attribute['Project_Mukim_nos']\r\n new_row_out.Project_Lot_nos = attribute['Project_Lot_nos']\r\n new_row_out.Permit_Type_of_Work = attribute['Permit_Type_of_Work']\r\n new_row_out.Type_of_Work = attribute['Type_of_Work']\r\n new_row_out.Owner_s_name = attribute['Owners_name']\r\n new_row_out.Owner_s_firm_name = attribute['Owners_firm_name']\r\n new_row_out.Owner_s_address = attribute['Owners_address']\r\n new_row_out.Owner_s_Tel_No = attribute['Owners_Tel_No']\r\n new_row_out.Owner_s_Email_address = attribute['Owners_Email_address']\r\n new_row_out.Builder_s_name = attribute['Builders_name']\r\n new_row_out.Builder_s_firm_name = attribute['Builders_firm_name']\r\n new_row_out.Builder_s_address = attribute['Builders_address']\r\n new_row_out.Builder_s_Tel_No = attribute['Builders_Tel_No']\r\n new_row_out.Builder_s_email_address = attribute['Builders_email_address']\r\n new_row_out.PE_s_name = attribute['PEs_name']\r\n new_row_out.PE_s_firm_name = attribute['PEs_firm_name']\r\n new_row_out.PE_s_address = attribute['PEs_address']\r\n new_row_out.PE_s_Tel_No = attribute['PEs_Tel_No']\r\n new_row_out.PE_s_Email_address = attribute['PEs_Email_address']\r\n new_row_out.Architect_s_name = attribute['Architects_name']\r\n new_row_out.Architect_s_firm_name = attribute['Architects_firm_name']\r\n new_row_out.Architect_s_address = attribute['Architects_address']\r\n new_row_out.Architect_s_Tel_No = attribute['Architects_Tel_No']\r\n new_row_out.Architect_s_Email_address = attribute['Architects_Email_address']\r\n new_row_out.Project_Cost = attribute['Project_Cost']\r\n new_row_out.Project_Duration = attribute['Project_Duration']\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = attribute['Approval_Date']\r\n rows_out.insertRow(new_row_out)\r\n if new_row_out:\r\n del new_row_out\r\n if rows_out:\r\n del rows_out\r\n\r\n except:\r\n log_error(\"Error in 02 Import of table to gdb: \", logFile)\r\n logFile.writelines(\"02 Import of table to gdb ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 03. Remove spaces in key fields for the concatenation\r\n logFile.writelines(\"03 Removing of spaces starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsSpace = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n\r\n for row in rowsSpace:\r\n ProjRef = row.Project_Ref_No.strip()\r\n ProjMukim = row.Project_Mukim_nos.strip()\r\n ProjLot = row.Project_Lot_nos.strip()\r\n BuilderN = row.Builder_s_name.strip()\r\n row.Project_Ref_No = ProjRef\r\n row.Project_Mukim_nos = ProjMukim\r\n row.Project_Lot_nos = ProjLot\r\n row.Builder_s_name = BuilderN\r\n rowsSpace.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpace:\r\n del rowsSpace\r\n except:\r\n log_error(\"Error in 03 Removing of spaces: \", logFile)\r\n logFile.writelines(\"03 Removing of spaces ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 04. Concatenate Project_Ref_No, Project_Mukim_nos, Project_Lot_nos, Builder_s_name\r\n logFile.writelines(\"04 Concatenate the three fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rows = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n for row in rows:\r\n expression = str(row.Project_Ref_No) + \"-\" + str(row.Project_Mukim_nos) + \"-\" + str(\r\n row.Project_Lot_nos) + \"-\" + str(row.Builder_s_name)\r\n row.ConcatFields = expression\r\n rows.updateRow(row)\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n\r\n except:\r\n log_error(\"Error in 04 Concatenate the three fields: \", logFile)\r\n logFile.writelines(\"04 Concatenate the three fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 05. Create temporary tables for Unique and Duplicate records\r\n logFile.writelines(\"05 Create temporary tables starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\Uniquerows\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\Uniquerows\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Uniquerows\", config.TemplateConcat, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Uniquerows\", config.TemplateConcat, \"\")\r\n\r\n if arcpy.Exists(config.TempDataGDB + \"\\\\Duplicaterows\"):\r\n arcpy.Delete_management(config.TempDataGDB + \"\\\\Duplicaterows\")\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Duplicaterows\", config.TemplateConcat, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Duplicaterows\", config.TemplateConcat, \"\")\r\n except:\r\n log_error(\"Error in 05 Create temporary tables: \", logFile)\r\n logFile.writelines(\"05 Create temporary tables ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 06. Separate unique and duplicate records\r\n logFile.writelines(\"06 Separate unique and duplicate rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n print \"Start step 06\"\r\n rows_inCB02 = arcpy.UpdateCursor(config.BCAReportGDBTable)\r\n rows_outUnique = arcpy.InsertCursor(config.UniqueRecords)\r\n # print rows_outUnique\r\n rows_outDuplicate = arcpy.InsertCursor(config.DuplicateRecords)\r\n\r\n rows_unique = []\r\n rows_duplicates = []\r\n for row in rows_inCB02:\r\n if row.ConcatFields not in rows_unique:\r\n rows_unique = rows_unique + [row.ConcatFields]\r\n else:\r\n rows_duplicates = rows_duplicates + [row.ConcatFields]\r\n\r\n print \"Start step 06 1\"\r\n for item in rows_unique:\r\n print \"clcounter: \" + str(clcounter)\r\n print \"item: \" + str(item)\r\n newrow = rows_outUnique.newRow()\r\n newrow.Concat = item\r\n # print newrow\r\n rows_outUnique.insertRow(newrow)\r\n\r\n print \"Start step 06 2\"\r\n for item in rows_duplicates:\r\n print \"clcounter: \" + str(clcounter)\r\n print \"item: \" + str(item)\r\n newrow = rows_outDuplicate.newRow()\r\n newrow.Concat = item\r\n rows_outDuplicate.insertRow(newrow)\r\n\r\n print \"Start step 06 3\"\r\n\r\n if rows_inCB02:\r\n del rows_inCB02\r\n if rows_outUnique:\r\n del rows_outUnique\r\n if rows_outDuplicate:\r\n del rows_outDuplicate\r\n if row:\r\n del row\r\n except:\r\n log_error(\"Error in 06 Separate unique and duplicate rows: \", logFile)\r\n logFile.writelines(\"06 Separate unique and duplicate rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 07. Get the rest of the fields for Uniquerows table\r\n logFile.writelines(\r\n \"07 Get the rest of the fields for unique rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.workspace = config.TempDataGDB\r\n arcpy.AddMessage(\"Starting toolbox JoinUniqueRestofFields\")\r\n\r\n try:\r\n arcpy.JoinUniqueRestofFields()\r\n except:\r\n log_error(\"Error in 07 Get the rest of the fields for unique rows: \", logFile)\r\n logFile.writelines(\r\n \"07 Get the rest of the fields for unique rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 08. Get the rest of the fields for Duplicaterows table\r\n logFile.writelines(\r\n \"08 Get the rest of the fields for duplicate rows starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"START toolbox JoinDuplicateRestofFields\")\r\n try:\r\n arcpy.JoinDuplicateRestofFields()\r\n\r\n except:\r\n log_error(\"Error in 08 Get the rest of the fields for duplicate rows: \", logFile)\r\n\r\n logFile.writelines(\r\n \"08 Get the rest of the fields for duplicate rows ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 09. Log duplicate records\r\n logFile.writelines(\"09 Log duplicate records starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"Logging duplicate records\")\r\n try:\r\n # Initialize the error log\r\n wbk = xlwt.Workbook()\r\n sheet = wbk.add_sheet('Book 1')\r\n row_count = 0\r\n col_count = 0\r\n header = ['Error_Message', 'Project_Ref_No', 'Project_Title', 'House_Blk_No', 'Road_Name', 'Level_No',\r\n 'Unit_No', 'Building_Name', 'Postal_Code', 'Project_Mukim_nos', 'Project_Lot_nos',\r\n 'Permit_Type_of_Work', 'Type_of_Work', 'Owners_name', 'Owners_firm_name', 'Owners_address',\r\n 'Owners_Tel_No', 'Owners_Email_address', 'Builders_name', 'Builders_firm_name',\r\n 'Builders_address', 'Builders_Tel_No', 'Builders_email_address', 'PEs_name', 'PEs_firm_name',\r\n 'PEs_address', 'PEs_Tel_No', 'PEs_Email_address', 'Architects_name', 'Architects_firm_name',\r\n 'Architects_address', 'Architects_Tel_No', 'Architects_Email_address', 'Project_Cost',\r\n 'Project_Duration', 'Approval_Date']\r\n for fieldname in header:\r\n sheet.write(row_count, col_count, fieldname)\r\n col_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n # Log duplicate records\r\n rows = arcpy.SearchCursor(config.DuplicateRows)\r\n\r\n row_count = 1\r\n col_count = 0\r\n row = None\r\n for row in rows:\r\n message = ['Duplicate record in the BCA report', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n except:\r\n log_error(\"Error in 09 Log duplicate records: \", logFile)\r\n\r\n logFile.writelines(\"09 Log duplicate records ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 10. Split rows based on Mukim numbers\r\n logFile.writelines(\"10 Splitting of rows based on mukim starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n if arcpy.Exists(config.SplittedMukimRows):\r\n arcpy.Delete_management(config.SplittedMukimRows)\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_mukim_03\", config.TemplateBCAReport, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_mukim_03\", config.TemplateBCAReport, \"\")\r\n\r\n if arcpy.Exists(config.SplittedProjLotRows):\r\n arcpy.Delete_management(config.SplittedProjLotRows)\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_projlot_04\", config.TemplateBCAReport, \"\")\r\n else:\r\n arcpy.CreateTable_management(config.TempDataGDB, \"Splitted_rows_projlot_04\", config.TemplateBCAReport, \"\")\r\n\r\n rows_in = arcpy.SearchCursor(config.UniqueRows)\r\n rows_out = arcpy.InsertCursor(config.SplittedMukimRows)\r\n\r\n for row in rows_in:\r\n list_mukim_nos = row.Project_Mukim_nos.split(\",\")\r\n for proj_mukim_nos_id in list_mukim_nos:\r\n new_row_out = rows_out.newRow()\r\n new_row_out.Project_Mukim_nos = proj_mukim_nos_id\r\n new_row_out.PROJECTMUKIM_RAW = row.Project_Mukim_nos\r\n new_row_out.Project_Ref_No = row.Project_Ref_No\r\n new_row_out.Project_Title = row.Project_Title\r\n new_row_out.House_Blk_No = row.House_Blk_No\r\n new_row_out.Road_Name = row.Road_Name\r\n new_row_out.Level_No = row.Level_No\r\n new_row_out.Unit_No = row.Unit_No\r\n new_row_out.Building_Name = row.Building_Name\r\n new_row_out.Postal_Code = row.Postal_Code\r\n new_row_out.Project_Lot_nos = row.Project_Lot_nos\r\n new_row_out.Permit_Type_of_Work = row.Permit_Type_of_Work\r\n new_row_out.Type_of_Work = row.Type_of_Work\r\n new_row_out.Owner_s_name = row.Owner_s_name\r\n new_row_out.Owner_s_firm_name = row.Owner_s_firm_name\r\n new_row_out.Owner_s_address = row.Owner_s_address\r\n new_row_out.Owner_s_Tel_No = row.Owner_s_Tel_No\r\n new_row_out.Owner_s_Email_address = row.Owner_s_Email_address\r\n new_row_out.Builder_s_name = row.Builder_s_name\r\n new_row_out.Builder_s_firm_name = row.Builder_s_firm_name\r\n new_row_out.Builder_s_address = row.Builder_s_address\r\n new_row_out.Builder_s_Tel_No = row.Builder_s_Tel_No\r\n new_row_out.Builder_s_email_address = row.Builder_s_email_address\r\n new_row_out.PE_s_name = row.PE_s_name\r\n new_row_out.PE_s_firm_name = row.PE_s_firm_name\r\n new_row_out.PE_s_address = row.PE_s_address\r\n new_row_out.PE_s_Tel_No = row.PE_s_Tel_No\r\n new_row_out.PE_s_Email_address = row.PE_s_Email_address\r\n new_row_out.Architect_s_name = row.Architect_s_name\r\n new_row_out.Architect_s_firm_name = row.Architect_s_firm_name\r\n new_row_out.Architect_s_address = row.Architect_s_address\r\n new_row_out.Architect_s_Tel_No = row.Architect_s_Tel_No\r\n new_row_out.Architect_s_Email_address = row.Architect_s_Email_address\r\n new_row_out.Project_Cost = row.Project_Cost\r\n new_row_out.Project_Duration = row.Project_Duration\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = row.Approval_Date_DD_MM_YYYY_\r\n rows_out.insertRow(new_row_out)\r\n if row:\r\n del row\r\n if new_row_out:\r\n del new_row_out\r\n if rows_in:\r\n del rows_in\r\n if rows_out:\r\n del rows_out\r\n except:\r\n log_error(\"Error in 10 Splitting of rows based on mukim: \", logFile)\r\n\r\n logFile.writelines(\"10 Splitting of rows based on mukim ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 11.Split rows based on Project lot numbers\r\n arcpy.AddMessage(\"Splitting rows based on project lots\")\r\n\r\n logFile.writelines(\r\n \"11 Splitting of rows based on project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rows_in03 = arcpy.SearchCursor(config.SplittedMukimRows)\r\n rows_out04 = arcpy.InsertCursor(config.SplittedProjLotRows)\r\n\r\n for row in rows_in03:\r\n list_proj_lot_nos = row.Project_Lot_nos.split(\",\")\r\n print list_proj_lot_nos\r\n for proj_lot_nos_id in list_proj_lot_nos:\r\n print proj_lot_nos_id\r\n new_row_out = rows_out04.newRow()\r\n new_row_out.Project_Lot_nos = proj_lot_nos_id\r\n new_row_out.PROJECTMUKIM_RAW = row.PROJECTMUKIM_RAW\r\n new_row_out.PROJECTLOT_RAW = row.Project_Lot_nos\r\n new_row_out.Project_Ref_No = row.Project_Ref_No\r\n new_row_out.Project_Title = row.Project_Title\r\n new_row_out.House_Blk_No = row.House_Blk_No\r\n new_row_out.Road_Name = row.Road_Name\r\n new_row_out.Level_No = row.Level_No\r\n new_row_out.Unit_No = row.Unit_No\r\n new_row_out.Building_Name = row.Building_Name\r\n new_row_out.Postal_Code = row.Postal_Code\r\n new_row_out.Project_Mukim_nos = row.Project_Mukim_nos\r\n new_row_out.Permit_Type_of_Work = row.Permit_Type_of_Work\r\n new_row_out.Type_of_Work = row.Type_of_Work\r\n new_row_out.Owner_s_name = row.Owner_s_name\r\n new_row_out.Owner_s_firm_name = row.Owner_s_firm_name\r\n new_row_out.Owner_s_address = row.Owner_s_address\r\n new_row_out.Owner_s_Tel_No = row.Owner_s_Tel_No\r\n new_row_out.Owner_s_Email_address = row.Owner_s_Email_address\r\n new_row_out.Builder_s_name = row.Builder_s_name\r\n new_row_out.Builder_s_firm_name = row.Builder_s_firm_name\r\n new_row_out.Builder_s_address = row.Builder_s_address\r\n new_row_out.Builder_s_Tel_No = row.Builder_s_Tel_No\r\n new_row_out.Builder_s_email_address = row.Builder_s_email_address\r\n new_row_out.PE_s_name = row.PE_s_name\r\n new_row_out.PE_s_firm_name = row.PE_s_firm_name\r\n new_row_out.PE_s_address = row.PE_s_address\r\n new_row_out.PE_s_Tel_No = row.PE_s_Tel_No\r\n new_row_out.PE_s_Email_address = row.PE_s_Email_address\r\n new_row_out.Architect_s_name = row.Architect_s_name\r\n new_row_out.Architect_s_firm_name = row.Architect_s_firm_name\r\n new_row_out.Architect_s_address = row.Architect_s_address\r\n new_row_out.Architect_s_Tel_No = row.Architect_s_Tel_No\r\n new_row_out.Architect_s_Email_address = row.Architect_s_Email_address\r\n new_row_out.Project_Cost = row.Project_Cost\r\n new_row_out.Project_Duration = row.Project_Duration\r\n new_row_out.Approval_Date_DD_MM_YYYY_ = row.Approval_Date_DD_MM_YYYY_\r\n rows_out04.insertRow(new_row_out)\r\n\r\n if row:\r\n del row\r\n if new_row_out:\r\n del new_row_out\r\n if rows_in03:\r\n del rows_in03\r\n if rows_out04:\r\n del rows_out04\r\n # print int(arcpy.GetCount_management(SplittedProjLotRows).getOutput(0))\r\n except:\r\n log_error(\"Error in 11 Splitting of rows based on project lot: \", logFile)\r\n logFile.writelines(\r\n \"11 Splitting of rows based on project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 12. Remove spaces in Mukim and Project lot values\r\n logFile.writelines(\r\n \"12 Removing of spaces in mukim and project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"Cleaning project lots\")\r\n try:\r\n\r\n rowsSpaces = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsSpaces:\r\n lot_no_spaces = row.Project_Lot_nos.strip()\r\n mukim_no_spaces = row.Project_Mukim_nos.strip()\r\n row.Project_Lot_nos = lot_no_spaces\r\n row.Project_Mukim_nos = mukim_no_spaces\r\n rowsSpaces.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpaces:\r\n del rowsSpaces\r\n except:\r\n log_error(\"Error in 12 Removing of spaces in mukim and project lot: \", logFile)\r\n logFile.writelines(\r\n \"12 Removing of spaces in mukim and project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 13. Log empty Mukimlot or date fields\r\n logFile.writelines(\r\n \"13 Log empty mukim and project lot nos starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsEmpty = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsEmpty:\r\n message = ['Missing Project lot or Mukim numbers', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n message2 = ['Missing Project duration or Approval date', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name,\r\n row.Architect_s_firm_name, row.Architect_s_address, row.Architect_s_Tel_No,\r\n row.Architect_s_Email_address, row.Project_Cost, row.Project_Duration,\r\n row.Approval_Date_DD_MM_YYYY_]\r\n if row.Project_Mukim_nos is None or (len(row.Project_Mukim_nos) < 4):\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n elif row.Project_Lot_nos is None or (len(row.Project_Lot_nos) == 0):\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n if row.Project_Duration is None or (len(row.Project_Duration) < 1):\r\n col_count = 0\r\n for element in message2:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n\r\n elif row.Approval_Date_DD_MM_YYYY_ is None or (len(row.Approval_Date_DD_MM_YYYY_) < 1):\r\n col_count = 0\r\n for element in message2:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsEmpty.deleteRow(row)\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rowsEmpty:\r\n del rowsEmpty\r\n except:\r\n log_error(\"Error in 13 Log for empty mukim and project lot nos: \", logFile)\r\n logFile.writelines(\"13 Log empty mukim and project lot nos ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 14. Error log for those with bad values\r\n arcpy.AddMessage(\"14 Logging bad values\")\r\n logFile.writelines(\"14 Log if bad values exist starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsBadValues = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n\r\n for row in rowsBadValues:\r\n message = ['Mukim or Project lot numbers have bad values', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n if len(REGEX_FOR_INVALID_CHARS.findall(row.Project_Lot_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n elif len(REGEX_FOR_INVALID_CHARS.findall(row.Project_Mukim_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n elif len(uptodigit(row.Project_Lot_nos)) > 0:\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n rowsBadValues.deleteRow(row)\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n if row:\r\n del row\r\n if rowsBadValues:\r\n del rowsBadValues\r\n except:\r\n log_error(\"Error in 14 Log if bad values exist: \", logFile)\r\n logFile.writelines(\"14 Log if bad values exist ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 15. Add zeros for Project Lot numbers\r\n logFile.writelines(\"15 Add zeros starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsZeros = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n letters = string.ascii_letters\r\n for row in rowsZeros:\r\n letter_count = len(filter(functools.partial(operator.contains, letters), row.Project_Lot_nos))\r\n filled_string = row.Project_Lot_nos.zfill(5 + letter_count)\r\n row.Project_Lot_nos = filled_string\r\n rowsZeros.updateRow(row)\r\n if row:\r\n del row\r\n if rowsZeros:\r\n del rowsZeros\r\n except:\r\n log_error(\"Error in 15 Add zeros: \", logFile)\r\n logFile.writelines(\"15 Add zeros ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 16. Add and populate fields Mukim_Lot_No, Mukimlot_wo_letter, and Permit_date\r\n logFile.writelines(\"16 Add and populate fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsPop = arcpy.UpdateCursor(config.SplittedProjLotRows)\r\n for row in rowsPop:\r\n expression = str(row.Project_Mukim_nos) + \"-\" + str(row.Project_Lot_nos)\r\n row.Mukim_Lot_No = expression\r\n date = filedate.strftime(\"%Y%m%d\")\r\n year = int(date[:4])\r\n month = int(date[4:6])\r\n day = int(date[6:8])\r\n permit_date = datetime.datetime(year, month, day)\r\n row.Permit_date = permit_date\r\n rowsPop.updateRow(row)\r\n if row:\r\n del row\r\n if rowsPop:\r\n del rowsPop\r\n # Calculate Mukimlot_wo_letter\r\n arcpy.CalculateField_management(config.SplittedProjLotRows, \"Mukimlot_wo_letter\", \"!Mukim_Lot_No![:10]\",\r\n \"PYTHON_9.3\", \"\")\r\n\r\n except:\r\n log_error(\"Error in 16 Add and populate fields: \", logFile)\r\n logFile.writelines(\"16 Add and populate fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 17.Match mukim lot and land lot\r\n logFile.writelines(\"17 Match mukim lot with landlot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.MatchMukimLandLot()\r\n except:\r\n log_error(\"Error in 17 Match mukim lot with landlot: \", logFile)\r\n logFile.writelines(\"17 Match mukim lot with landlot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 18.Get unmatched mukim lot with land lot\r\n logFile.writelines(\"18 Get unmatched mukim lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"18 Get unmatched mukim lot\")\r\n try:\r\n arcpy.GetUnmatchedMukimLot()\r\n\r\n except:\r\n log_error(\"Error in 18 Get unmatched mukim lot: \", logFile)\r\n\r\n logFile.writelines(\"18 Get unmatched mukim lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 19. Log errors for unmatched mukim lots\r\n logFile.writelines(\"19 Log unmatched mukim lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsUnmatched = arcpy.SearchCursor(config.UnmatchedMukimLot)\r\n row = None\r\n\r\n for row in rowsUnmatched:\r\n message = ['Unmatched mukim lot with the land lot', row.Project_Ref_No, row.Project_Title,\r\n row.House_Blk_No, row.Road_Name, row.Level_No, row.Unit_No, row.Building_Name,\r\n row.Postal_Code, row.Project_Mukim_nos, row.Project_Lot_nos, row.Permit_Type_of_Work,\r\n row.Type_of_Work, row.Owner_s_name, row.Owner_s_firm_name, row.Owner_s_address,\r\n row.Owner_s_Tel_No, row.Owner_s_Email_address, row.Builder_s_name,\r\n row.Builder_s_firm_name, row.Builder_s_address, row.Builder_s_Tel_No,\r\n row.Builder_s_email_address, row.PE_s_name, row.PE_s_firm_name, row.PE_s_address,\r\n row.PE_s_Tel_No, row.PE_s_Email_address, row.Architect_s_name, row.Architect_s_firm_name,\r\n row.Architect_s_address, row.Architect_s_Tel_No, row.Architect_s_Email_address,\r\n row.Project_Cost, row.Project_Duration, row.Approval_Date_DD_MM_YYYY_]\r\n col_count = 0\r\n for element in message:\r\n sheet.write(row_count, col_count, element)\r\n col_count += 1\r\n row_count += 1\r\n wbk.save(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n if row:\r\n del row\r\n if rowsUnmatched:\r\n del rowsUnmatched\r\n\r\n with xlrd.open_workbook(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\") as wb:\r\n sh = wb.sheet_by_index(0)\r\n if sh.nrows == 1:\r\n os.remove(config.ErrorLogFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \"_err\" + \".xls\")\r\n\r\n except arcpy.ExecuteError:\r\n log_error(\"Error in 19 Log unmatched mukim lot: \", logFile)\r\n logFile.writelines(\"19 Log unmatched mukim lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 20. Prepare the table for MukimConstruct matching (add required fields)\r\n logFile.writelines(\"20 Add fields to be used for matching starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n if arcpy.Exists(config.MUKIMCONSTRUCTImport):\r\n arcpy.Delete_management(config.MUKIMCONSTRUCTImport)\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.TempDataGDB,\r\n \"MUKIM_CONSTRUCT_Import\")\r\n else:\r\n arcpy.FeatureClassToFeatureClass_conversion(config.MukimConstructSource, config.TempDataGDB,\r\n \"MUKIM_CONSTRUCT_Import\")\r\n\r\n arcpy.AddField_management(config.MatchedMukimLot, \"Concat_4fields\", \"Text\", \"\", \"\", \"\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCTImport, \"Concat_4fields\", \"Text\", \"\", \"\", \"\")\r\n arcpy.AddField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS2\", \"Double\", \"\", \"\", \"\")\r\n except:\r\n log_error(\"Error in 20 Add fields to be used for matching: \", logFile)\r\n logFile.writelines(\"20 Add fields to be used for matching ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 21. Calculate Project Duration as months\r\n logFile.writelines(\"21 Calculate PROJ_DURATION as months starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsProjDur = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsProjDur:\r\n durationstr = row.PROJ_DURATION_MTHS\r\n if \"Month\" in row.PROJ_DURATION_MTHS:\r\n durationintmth = int(durationstr.split(' ')[0])\r\n row.PROJ_DURATION_MTHS2 = durationintmth\r\n elif \"Year\" in row.PROJ_DURATION_MTHS:\r\n durationintyr = int(durationstr.split(' ')[0]) * 12\r\n row.PROJ_DURATION_MTHS2 = durationintyr\r\n rowsProjDur.updateRow(row)\r\n if rowsProjDur:\r\n del rowsProjDur\r\n if row:\r\n del row\r\n\r\n arcpy.DeleteField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\")\r\n arcpy.AddField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\", \"Double\")\r\n arcpy.CalculateField_management(config.MatchedMukimLot, \"PROJ_DURATION_MTHS\", \"[PROJ_DURATION_MTHS2]\")\r\n except:\r\n log_error(\"Error in 21 Calculate PROJ_DURATION as months: \", logFile)\r\n logFile.writelines(\"21 Calculate PROJ_DURATION as months ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 22. Concatenate 4 fields to be used in checking if mukimlot already exists in MUKIMCONSTRUCT\r\n logFile.writelines(\"22 Concatenate 4 fields starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsConcat1 = arcpy.UpdateCursor(config.MUKIMCONSTRUCTImport)\r\n\r\n for row in rowsConcat1:\r\n expression = str(row.PROJ_REF_NO) + \"-\" + str(row.BUILDER_NAME) + \"-\" + str(\r\n row.LOT_KEY) + \"-\" + str(row.PERMIT_DATE)\r\n row.Concat_4fields = expression\r\n rowsConcat1.updateRow(row)\r\n if row:\r\n del row\r\n if rowsConcat1:\r\n del rowsConcat1\r\n\r\n rowsConcat2 = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsConcat2:\r\n expression = str(row.PROJ_REF_NO) + \"-\" + str(row.BUILDER_NAME) + \"-\" + str(\r\n row.LOT_KEY) + \"-\" + str(row.PERMIT_DATE)\r\n row.Concat_4fields = expression\r\n rowsConcat2.updateRow(row)\r\n if row:\r\n del row\r\n if rowsConcat2:\r\n del rowsConcat2\r\n except:\r\n log_error(\"Error in 22 Concatenate 4 fields: \", logFile)\r\n logFile.writelines(\"22 Concatenate 4 fields ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 23.Match mukim lot with mukim construct\r\n logFile.writelines(\"23 Match mukimlot with mukim construct at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.workspace = config.TempDataGDB # \"G:\\\\Project\\\\GERIUPGRADE\\\\GPTools\\\\NotificationSysTools\\\\BCAReportProcessing\\\\Temp_data.gdb\"\r\n try:\r\n arcpy.MatchedMukimlotMukimConstruct()\r\n except:\r\n log_error(\"Error in 23 Match mukimlot with mukim construct: \", logFile)\r\n logFile.writelines(\"23 Match mukimlot with mukim construct ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 24.Copy raw values to project lot and project mukim columns and delete the 2 fields\r\n logFile.writelines(\"24 Recalculate projlot and projmukim based on original values starts at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rowsRaw = arcpy.UpdateCursor(config.MatchedMukimLot)\r\n\r\n for row in rowsRaw:\r\n row.PROJ_MUKIM_NOS = row.PROJECTMUKIM_RAW\r\n row.PROJ_LOT_NOS = row.PROJECTLOT_RAW\r\n rowsRaw.updateRow(row)\r\n if row:\r\n del row\r\n if rowsRaw:\r\n del rowsRaw\r\n except:\r\n log_error(\"Error in 24 Recalculate projlot and projmukim based on original values:\", logFile)\r\n logFile.writelines(\"24 Recalculate projlot and projmukim based on original values ends at \" + str(\r\n datetime.datetime.now()) + \"\\n\")\r\n\r\n # 25. Export Cleaned BCA Permit report for CWD\r\n logFile.writelines(\r\n \"25 Export of Cleaned BCA Permit report starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n # Initialize the file\r\n CleanedBCAPermitReport = xlwt.Workbook()\r\n book = CleanedBCAPermitReport.add_sheet('Book 1')\r\n countrow = 0\r\n countcol = 0\r\n fields = ['Project Ref No', 'Project Title', 'House Blk No', 'Road Name', 'Level No', 'Unit No',\r\n 'Building Name', 'Postal Code', 'Project Mukim nos', 'Project Lot nos', 'Permit Type of Work',\r\n 'Type of Work', \"Owner's name\", \"Owner's firm name\", \"Owner's address\", \"Owner's Tel No\",\r\n \"Owner's Email address\", \"Builder's name\", \"Builder's firm name\", \"Builder's address\",\r\n \"Builder's Tel No\", \"Builder's email address\", \"PE's name\", \"PE's firm name\", \"PE's address\",\r\n \"PE's Tel No\", \"PE's Email address\", \"Architect's name\", \"Architect's firm name\",\r\n \"Architect's address\", \"Architect's Tel No\", \"Architect's Email address\", 'Project Cost',\r\n 'Project Duration', 'Approval Date(DD/MM/YYYY)']\r\n for fieldname in fields:\r\n book.write(countrow, countcol, fieldname)\r\n countcol += 1\r\n CleanedBCAPermitReport.save(config.CleanedBCAPermitFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \".xls\")\r\n\r\n # Copy the data to Excel File\r\n data = arcpy.SearchCursor(config.MatchedMukimLot)\r\n\r\n countrow = 1\r\n countcol = 0\r\n for row in data:\r\n message = [row.PROJ_REF_NO, row.PROJ_TITLE, row.HOUSE_BLK_NO, row.ROAD_NAME, row.LEVEL_NO,\r\n row.UNIT_NO, row.BUILDING_NAME, row.POSTAL_CODE, row.PROJ_MUKIM_NOS, row.PROJ_LOT_NOS,\r\n row.PERMIT_WORK_TYPE, row.WORK_TYPE, row.OWNER_NAME, row.OWNER_FIRM_NAME, row.OWNER_ADDR,\r\n row.OWNER_TEL, row.OWNER_EMAIL, row.BUILDER_NAME, row.BUILDER_FIRM_NAME,\r\n row.BUILDER_ADDR, row.BUILDER_TEL, row.BUILDER_EMAIL, row.PE_NAME, row.PE_FIRM_NAME,\r\n row.PE_ADDR, row.PE_TEL, row.PE_EMAIL, row.ARCHITECT_NAME, row.ARCHITECT_FIRM_NAME,\r\n row.ARCHITECT_ADDR, row.ARCHITECT_TEL, row.ARCHITECT_EMAIL, row.PROJ_COST,\r\n row.PROJ_DURATION_MTHS, row.PROJ_APPROVAL_DATE]\r\n countcol = 0\r\n for element in message:\r\n book.write(countrow, countcol, element)\r\n countcol += 1\r\n countrow += 1\r\n CleanedBCAPermitReport.save(config.CleanedBCAPermitFolder + \"\\\\\" + input_file_name.split(\".\")[0] + \".xls\")\r\n if row:\r\n del row\r\n if data:\r\n del data\r\n except:\r\n log_error(\"Error in 25 Export of Cleaned BCA Permit Report: Error in 26 Catchment calculation: \", logFile)\r\n logFile.writelines(\"25 Export of Cleaned BCA Permit Report ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 26. Catchment calculation\r\n arcpy.env.workspace = config.TempDataGDB\r\n logFile.writelines(\"26 Catchment calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.CatchmentCalculation()\r\n except:\r\n log_error(\"Error in 26 Catchment calculation: \", logFile)\r\n logFile.writelines(\"26 Catchment calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 27. Depot calculation\r\n logFile.writelines(\"27 Depot calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.DepotCalculation()\r\n except:\r\n log_error(\"Error in 27 Depot calculation: \", logFile)\r\n logFile.writelines(\"27 Depot calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 28. Re-add date fields and populate\r\n logFile.writelines(\"28 Re-add date fields and populate starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PERMIT_DATE\", \"Date\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PROJ_APPROVAL_DATE2\", \"Date\")\r\n arcpy.AddField_management(config.MUKIMCONSTRUCT_Temp, \"PROJ_END_DATE\", \"Date\")\r\n\r\n rows = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rows:\r\n date = filedate.strftime(\"%Y%m%d\")\r\n year = int(date[:4])\r\n month = int(date[4:6])\r\n day = int(date[6:8])\r\n permit_date = datetime.datetime(year, month, day)\r\n row.PERMIT_DATE = permit_date\r\n row.PROJ_APPROVAL_DATE2 = datetime.datetime.strptime(row.PROJ_APPROVAL_DATE, '%d/%m/%Y')\r\n rows.updateRow(row)\r\n if row:\r\n del row\r\n if rows:\r\n del rows\r\n except:\r\n log_error(\"Error in 28 Re-add fields and populate: \", logFile)\r\n logFile.writelines(\"28 Re-add fields and populate ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 29. Calculate the end date field\r\n logFile.writelines(\"29 Calculate the end date field starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n\r\n rowsEndDate = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rowsEndDate:\r\n sourcedate = row.PROJ_APPROVAL_DATE2\r\n # sourcedate = datetime.datetime.strptime(row.PROJ_APPROVAL_DATE2 , '%d/%m/%Y')\r\n months = int(row.PROJ_DURATION_MTHS)\r\n d = add_months(sourcedate, months)\r\n row.PROJ_END_DATE = d\r\n rowsEndDate.updateRow(row)\r\n if row:\r\n del row\r\n if rowsEndDate:\r\n del rowsEndDate\r\n except:\r\n log_error(\"Error in 29 Calculate the end date field: \", logFile)\r\n logFile.writelines(\"29 Calculate the end date field ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 30. Calculate Project Total Area\r\n logFile.writelines(\"30 Project total area calculation starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.ProjectTotalArea()\r\n except:\r\n log_error(\"Error in 30 Project total area calculation: \", logFile)\r\n logFile.writelines(\"30 Project total area calculation ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 31. Calculate the BCA_CORRECTED_BY\r\n logFile.writelines(\"31 Calculate the BCA_CORRECTED_BY starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n rows_BCA_CB = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rows_BCA_CB:\r\n if \"\\WSN\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"WSN\"\r\n elif \"\\WRN\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"WRN\"\r\n elif \"\\CWD\\\\\" in BCAreport:\r\n row.BCA_CORRECTED_BY = \"CWD\"\r\n rows_BCA_CB.updateRow(row)\r\n if row:\r\n del row\r\n if rows_BCA_CB:\r\n del rows_BCA_CB\r\n except:\r\n log_error(\"Error in 31 Calculate the BCA_CORRECTED_BY: \", logFile)\r\n\r\n # 32. Remove spaces in PROJ_REF_NO\r\n logFile.writelines(\r\n \"32 Removing of spaces in mukim and project lot starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n try:\r\n rowsSpaces = arcpy.UpdateCursor(config.MUKIMCONSTRUCT_Temp)\r\n\r\n for row in rowsSpaces:\r\n lot_no_spaces = row.PROJ_REF_NO.strip()\r\n row.PROJ_REF_NO = lot_no_spaces\r\n rowsSpaces.updateRow(row)\r\n if row:\r\n del row\r\n if rowsSpaces:\r\n del rowsSpaces\r\n except:\r\n log_error(\"Error in 32 Removing of spaces in mukim and project lot: \", logFile)\r\n logFile.writelines(\r\n \"32 Removing of spaces in mukim and project lot ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 33. Process the Mukim Construct by Project\r\n logFile.writelines(\r\n \"33 Process the Mukim Construct by Project starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.env.overwriteOutput = True\r\n try:\r\n MUKIM_CONSTRUCT_BYPROJ_IMPORT = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_IMPORT\"\r\n MUKIMCONBYPROJ_SORT = config.TempDataGDB + \"\\\\MUKIMCONBYPROJ_SORT\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS__2_ = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n\r\n if arcpy.Exists(MUKIM_CONSTRUCT_BYPROJ_IMPORT):\r\n arcpy.Delete_management(MUKIM_CONSTRUCT_BYPROJ_IMPORT)\r\n if arcpy.Exists(MUKIMCONBYPROJ_SORT):\r\n arcpy.Delete_management(MUKIMCONBYPROJ_SORT)\r\n if arcpy.Exists(MUKIM_CONSTRUCT_BYPROJ_DISS):\r\n arcpy.Delete_management(MUKIM_CONSTRUCT_BYPROJ_DISS)\r\n\r\n arcpy.MUKIMCONBYPROJ()\r\n # arcpy.MUKIMCONSTRUCTBYPROJProcess2()\r\n\r\n arcpy.Sort_management(MUKIM_CONSTRUCT_BYPROJ_IMPORT, MUKIMCONBYPROJ_SORT, \"PROJ_END_DATE DESCENDING\",\r\n \"UR\")\r\n arcpy.Dissolve_management(MUKIMCONBYPROJ_SORT, MUKIM_CONSTRUCT_BYPROJ_DISS, \"PROJ_REF_NO\",\r\n \"LOT_KEY FIRST;PROJ_REF_NO FIRST;PROJ_TITLE FIRST;HOUSE_BLK_NO FIRST;ROAD_NAME FIRST;POSTAL_CODE FIRST;LEVEL_NO FIRST;UNIT_NO FIRST;BUILDING_NAME FIRST;PROJ_MUKIM_NOS FIRST;PROJ_LOT_NOS FIRST;PERMIT_WORK_TYPE FIRST;WORK_TYPE FIRST;OWNER_NAME FIRST;OWNER_FIRM_NAME FIRST;OWNER_ADDR FIRST;OWNER_TEL FIRST;OWNER_EMAIL FIRST;BUILDER_NAME FIRST;BUILDER_FIRM_NAME FIRST;BUILDER_ADDR FIRST;BUILDER_TEL FIRST;BUILDER_EMAIL FIRST;PE_NAME FIRST;PE_FIRM_NAME FIRST;PE_ADDR FIRST;PE_TEL FIRST;PE_EMAIL FIRST;ARCHITECT_NAME FIRST;ARCHITECT_FIRM_NAME FIRST;ARCHITECT_ADDR FIRST;ARCHITECT_TEL FIRST;ARCHITECT_EMAIL FIRST;PROJ_TOT_AREA FIRST;PROJ_PARENT_CWDCATCHMENT FIRST;PROJ_PARENT_WSNDEPOT FIRST;PROJ_PARENT_WRPCATCHMENT FIRST;BCA_CORRECTED_BY FIRST;PROJ_DURATION_MTHS FIRST;PROJ_COST FIRST\",\r\n \"MULTI_PART\", \"DISSOLVE_LINES\")\r\n arcpy.JoinField_management(MUKIM_CONSTRUCT_BYPROJ_DISS, \"FIRST_PROJ_REF_NO\", MUKIMCONBYPROJ_SORT,\r\n \"PROJ_REF_NO\", \"PROJ_APPROVAL_DATE;PROJ_END_DATE;PERMIT_DATE\")\r\n arcpy.CalculateField_management(MUKIM_CONSTRUCT_BYPROJ_DISS__2_, \"FIRST_PROJ_TOT_AREA\",\r\n \"[Shape_Area]/10000\", \"VB\", \"\")\r\n\r\n except:\r\n log_error(\"Error in 33 Process the Mukim Construct by Project: \", logFile)\r\n logFile.writelines(\r\n \"33 Process the Mukim Construct by Project ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n arcpy.AddMessage(\"33 END process MUKIM CONSTRUCT\")\r\n\r\n # 34. Filter on-going projects\r\n\r\n logFile.writelines(\"34 Filter on-going projects starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n # TempDataGDB = \"G:\\\\Project\\\\GERIUPGRADE\\\\GPTools\\\\NotificationSysTools\\\\BCAReportProcessing\\\\Temp_data.gdb\"\r\n MUKIM_CONSTRUCT_BYPROJ_DISS = config.TempDataGDB + \"\\\\MUKIM_CONSTRUCT_BYPROJ_DISS\"\r\n rowsIn = arcpy.UpdateCursor(MUKIM_CONSTRUCT_BYPROJ_DISS)\r\n\r\n row = None\r\n for row in rowsIn:\r\n strdays = str(row.PROJ_END_DATE.date() - datetime.date.today())\r\n splitDays = strdays.split()\r\n if splitDays[0] == '0:00:00':\r\n result = \"On-going project (but will end today)\"\r\n else:\r\n if int(splitDays[0]) < 0:\r\n rowsIn.deleteRow(row)\r\n else:\r\n result = \"On-going project\"\r\n if rowsIn:\r\n del rowsIn\r\n if row:\r\n del row\r\n\r\n except:\r\n log_error(\"Error in 34 Filter on-going projects: \", logFile)\r\n logFile.writelines(\"34 Filter on-going projects ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # 35. Append the new data to MUKIM_CONSTRUCT\r\n logFile.writelines(\r\n \"35 Append the new data to MUKIM_CONSTRUCT starts at \" + str(datetime.datetime.now()) + \"\\n\")\r\n try:\r\n arcpy.AppendNewData()\r\n except:\r\n log_error(\"Error in 35 Append the new data to MUKIM_CONSTRUCT: \", logFile)\r\n logFile.writelines(\r\n \"35 Append the new data to MUKIM_CONSTRUCT ends at \" + str(datetime.datetime.now()) + \"\\n\")\r\n\r\n # Clean the memory and the schema lock\r\n arcpy.RefreshCatalog(config.Notification)\r\n arcpy.Compact_management(config.TempDataGDB)\r\n gc.collect()\r\n\r\n # Status update to run/not run the SiteInspection Update\r\n Log_SiteInspectionUpdate = file(config.SiteInspectionUpdate, \"w\")\r\n Log_SiteInspectionUpdate.writelines(\"YES\")\r\n Log_SiteInspectionUpdate.close()\r\n\r\n arcpy.AddMessage(\"END BCA Processing\")\r\n arcpy.AddMessage(\"Passing file date to other functions: \" + repr(filedate))\r\n\r\n # Generate Report\r\n import ReportGeneration_Adhoc_WithProjects as gen_report\r\n gen_report.run(filedate)\r\n #\r\n # # Send email to departments\r\n # import EmailGenerationCompletion_adhoc as send_dept_notification\r\n # if \"CORRECTED\" in BCAreport.upper():\r\n # send_dept_notification.run(filedate, corrected=True)\r\n # else:\r\n # send_dept_notification.run(filedate)\r\n\r\n # Generate advisory letters\r\n import LetterGeneration as letter_gen\r\n letter_gen.run(filedate)\r\n #\r\n # # Send letters to project team\r\n # import EmailGeneration as send_advisory_email\r\n # send_advisory_email.run(filedate)\r\n\r\n\r\n # 36. Move the BCAReport in the backup folder\r\n for BCAreport in correct_config_files:\r\n\r\n input_file_name = BCAreport.split(\"\\\\\")[-1]\r\n bk_file_path = os.path.join(config.BCAreportBackupFolder, input_file_name)\r\n\r\n # if the same file name exists in the backup folder, rename the new file with timestamp and move\r\n if os.path.exists(bk_file_path):\r\n\r\n new_filename = datetime.datetime.now().strftime(\"%Y%m%d-%H%M\") + input_file_name\r\n new_filepath = os.path.join(config.BCAreportBackupFolder, new_filename)\r\n shutil.copy(BCAreport, new_filepath)\r\n os.remove(BCAreport)\r\n\r\n # if the filename does not exist in the backup folder, move the file to backup\r\n else:\r\n shutil.move(BCAreport, config.BCAreportBackupFolder)\r\n\r\n logFile.writelines(\"Moved the BCA report to the backup folder at \" + str(datetime.datetime.now()) + \"\\n\")\r\n logFile.close()", "def cleanup(self):\n\n self.PLC['1'].set_plc_mode(0)\n self.PLC['1'].plc_clear('all')\n super(Test200SmartSanityClear005, self).cleanup()", "def parametersweep(basedir,configfile,acfdir='ACF',invtype='tik'):\n\n alpha_sweep=sp.logspace(-3.5,sp.log10(7),25)\n costdir = os.path.join(basedir,'Cost')\n ionoinfname=os.path.join(basedir,acfdir,'00lags.h5')\n ionoin=IonoContainer.readh5(ionoinfname)\n \n dirio = ('Spectrums','Mat','ACFMat')\n inputdir = os.path.join(basedir,dirio[0])\n \n dirlist = glob.glob(os.path.join(inputdir,'*.h5'))\n (listorder,timevector,filenumbering,timebeg,time_s) = IonoContainer.gettimes(dirlist)\n Ionolist = [dirlist[ikey] for ikey in listorder]\n \n RSTO = RadarSpaceTimeOperator(Ionolist,configfile,timevector,mattype='Sim')\n \n npts=RSTO.simparams['numpoints']\n \n ionospec=makeionocombined(dirlist)\n if npts==ionospec.Param_List.shape[-1]:\n tau,acfin=spect2acf(ionospec.Param_Names,ionospec.Param_List)\n nloc,ntimes=acfin.shape[:2]\n ambmat=RSTO.simparams['amb_dict']['WttMatrix']\n np=ambmat.shape[0]\n acfin_amb=sp.zeros((nloc,ntimes,np),dtype=acfin.dtype)\n # get the original acf\n \n \n ambmat=RSTO.simparams['amb_dict']['WttMatrix']\n np=ambmat.shape[0]\n \n for iloc,locarr in enumerate(acfin):\n for itime,acfarr in enumerate(locarr):\n acfin_amb[iloc,itime]=sp.dot(ambmat,acfarr)\n acfin_amb=acfin_amb[:,0]\n else:\n acfin_amb=ionospec.Param_List[:,0]\n \n if not os.path.isdir(costdir):\n os.mkdir(costdir)\n # pickle file stuff \n pname=os.path.join(costdir,'cost{0}-{1}.pickle'.format(acfdir,invtype))\n\n alpha_list=[]\n errorlist=[]\n errorlaglist=[]\n datadiflist=[]\n constlist=[]\n if 'perryplane' in basedir.lower() or 'SimpData':\n rbounds=[-500,500]\n else:\n rbounds=[0,500]\n\n alpha_list_new=alpha_sweep.tolist()\n for i in alpha_list:\n if i in alpha_list_new:\n alpha_list_new.remove(i)\n \n for i in alpha_list_new:\n ionoout,datadif,constdif=invertRSTO(RSTO,ionoin,alpha_list=i,invtype=invtype,rbounds=rbounds,Nlin=1)\n \n datadiflist.append(datadif)\n constlist.append(constdif)\n acfout=ionoout.Param_List[:,0]\n alpha_list.append(i)\n outdata=sp.power(sp.absolute(acfout-acfin_amb),2)\n aveerror=sp.sqrt(sp.nanmean(outdata,axis=0))\n errorlaglist.append(aveerror)\n errorlist.append(sp.nansum(aveerror))\n \n pickleFile = open(pname, 'wb')\n pickle.dump([alpha_list,errorlist,datadiflist,constlist,errorlaglist],pickleFile)\n pickleFile.close()\n mkalphalist(pname)\n alphaarr=sp.array(alpha_list)\n errorarr=sp.array(errorlist)\n errorlagarr=sp.array(errorlaglist)\n datadif=sp.array(datadiflist)\n constdif=sp.array(constlist)\n fig,axlist,axmain=plotalphaerror(alphaarr,errorarr,errorlagarr)\n fig.savefig(os.path.join(costdir,'cost{0}-{1}.png'.format(acfdir,invtype)))\n \n fig,axlist=plotLcurve(alphaarr,datadif,constdif)\n fig.savefig(os.path.join(costdir,'lcurve{0}-{1}.png'.format(acfdir,invtype)))", "def cleanup(self):\n fmin = self._minuit_problem.get_fmin()\n if self._status == 0:\n self.flag = 0\n elif fmin.has_reached_call_limit:\n self.flag = 1\n else:\n self.flag = 2\n\n self._popt = self._minuit_problem.np_values()\n self.final_params = self._popt", "def run_pca(data_file, rs, n_components, outfile1, outfile2):\n print('running PCA with n_components={}'.format(n_components))\n day_batcher = DayBatcher(data_file, skiprow=1, delimiter=' ')\n mat = day_batcher.next_batch()\n rst = []\n while mat is not None:\n if mat.shape[1] == 13:\n # use compact10d\n datadict = {'features': mat[:, 3:],\n 'red': mat[:, 2],\n 'user': mat[:, 1],\n 'day': mat[:, 0]}\n else:\n # use all_fixed\n datadict = {'features': mat[:, 14:],\n 'red': mat[:, 13],\n 'user': mat[:, 1],\n 'day': mat[:, 0]}\n batch = scale(datadict['features'])\n pca = PCA(n_components=n_components, random_state=rs)\n pca.fit(batch)\n data_reduced = np.dot(batch, pca.components_.T) # pca transform\n data_original = np.dot(data_reduced, pca.components_) # inverse_transform\n pointloss = np.mean(np.square(batch - data_original), axis=1)\n loss = np.mean(pointloss)\n for d, u, t, l, in zip(datadict['day'].tolist(),\n datadict['user'].tolist(),\n datadict['red'].tolist(),\n pointloss.flatten().tolist()):\n rst.append((u, d, l, t))\n mat = day_batcher.next_batch()\n train_rst, test_rst = split_train_test(rst)\n save_rst(train_rst, outfile1)\n save_rst(test_rst, outfile2)\n eval_cr(test_rst, 'pca')", "def cleanup_aai(cls):\n logger.info(\"####################### Start to clean up AAI settings\")\n aai = Customer.get_by_global_customer_id(\"5GCustomer\")\n aai.delete()", "def correct_bad_chair(phases_dict):\n if len(phases_dict[\"instance_idx\"]) - 1 != phases_dict[\"n_objects\"]:\n # remove the empty object\n obj_points = []\n n_empty_obj = 0\n opt_ids = []\n for opt_id, opts in enumerate(phases_dict[\"obj_points\"]):\n if not opts.shape[0] == 0:\n obj_points.append(opts)\n opt_ids.append(opt_id)\n else:\n n_empty_obj += 1\n phases_dict[\"obj_points\"] = obj_points\n phases_dict[\"before_fix_n_objects\"] = phases_dict[\"n_objects\"]\n phases_dict[\"n_objects\"] = len(obj_points)\n phases_dict[\"bad_lamp\"] = True\n phases_dict[\"ok_obj_id\"] = opt_ids\n assert(len(phases_dict[\"instance_idx\"]) - 1 == phases_dict[\"n_objects\"])\n return True\n else:\n # there is empty mesh in drop\n\n if \"drop\" in phases_dict[\"trial_dir\"] and \"train/50\" in phases_dict[\"trial_dir\"]:\n\n n_empty_obj = 0\n opt_ids = []\n for opt_id, opts in enumerate(phases_dict[\"obj_points\"]):\n if not opts.shape[0] == 0:\n opt_ids.append(opt_id)\n else:\n n_empty_obj += 1\n if n_empty_obj > 0:\n\n\n list_items = [\"root_des_radius\", \"root_num\", \"clusters\", \"instance\", \"material\", \"obj_points\"]\n for item in list_items:\n phases_dict[item] = [phases_dict[item][a] for a in opt_ids]\n new_instance_idx = [0]\n for obj_pts in phases_dict[\"obj_points\"]:\n new_instance_idx.append(new_instance_idx[-1] + obj_pts.shape[0])\n\n phases_dict[\"instance_idx\"] = new_instance_idx\n phases_dict[\"n_objects\"] = len(phases_dict[\"obj_points\"])\n phases_dict[\"ok_obj_id\"] = opt_ids\n\n assert(phases_dict[\"n_particles\"] == new_instance_idx[-1])\n assert(len(phases_dict[\"instance_idx\"]) - 1 == phases_dict[\"n_objects\"])\n assert(len(phases_dict[\"root_num\"]) == phases_dict[\"n_objects\"])\n return True\n else:\n return False\n\n\n return False", "def modify_ca_data_df(p_df, ca_data_df):\r\n ca_data_df.at[0, 'T1'] = p_df['T1'].iloc[-1]\r\n ca_data_df.at[0, 'T2'] = p_df['T2'].iloc[-1]\r\n ca_data_df.at[0, 'T3'] = p_df['T3'].iloc[-1]\r\n ca_data_df.at[0, 'T4'] = p_df['T4'].iloc[-1]\r\n ca_data_df.at[0, 'T5'] = p_df['T5'].iloc[-1]\r\n ca_data_df.at[0, 'q'] = p_df['q'].iloc[-1]\r\n ca_data_df.at[0, 'setup_speed'] = p_df['setup_speed'].iloc[-1]\r\n ca_data_df.loc[0, 'purpose'] = ''\r\n ca_data_df.loc[0, 'request_type'] = ''\r\n return ca_data_df", "def run_pycma(self):\n\n self.mug_pipeline.set_folder_names(self.folder_name)\n self.mug_pipeline.set_optimizer_type(OptimizerType.PYCMA)\n\n self.mug_initial_poses = []\n\n for i in range(self.num_mugs):\n self.mug_initial_poses += \\\n RollPitchYaw(np.random.uniform(0.0, 2.0*np.pi, size=3)).ToQuaternion().wxyz().tolist() + \\\n [np.random.uniform(-0.1, 0.1), np.random.uniform(-0.1, 0.1), np.random.uniform(0.1, 0.2)]\n\n print(self.mug_initial_poses, flush=True)\n\n iter_num = 0\n\n start_time = time.time()\n elapsed_time = 0\n\n manager = Manager()\n self.all_probabilities = manager.list()\n all_probabilities_lock = manager.Lock()\n\n self.total_iterations = manager.Value('d', 0)\n self.num_counterexamples = manager.Value('d', 0)\n\n self.model_number = manager.Value('d', 0)\n model_number_lock = manager.Lock()\n\n counter_lock = manager.Lock()\n\n file_q = manager.Queue()\n\n filename = '{}/results.csv'.format(self.folder_name)\n watcher = Process(target=self.listener, args=(file_q, filename))\n watcher.start()\n\n # TODO: share GPU for inference using model.share_memory()\n\n es = cma.CMAEvolutionStrategy(self.mug_initial_poses, 1.0/3.0,\n {'bounds': [-1.0, 1.0], 'verb_disp': 1, 'popsize': self.num_processes})\n\n while not es.stop():\n try:\n ep = EvalParallel3(self.run_inference, number_of_processes=self.num_processes)\n lst = range(iter_num, iter_num + self.num_processes)\n X = es.ask()\n elapsed_time = time.time() - start_time\n jobs = ep(X, lst=lst, args=(self.mug_pipeline, self.all_probabilities,\n self.total_iterations, self.num_counterexamples,\n self.model_number, model_number_lock, counter_lock,\n all_probabilities_lock, file_q))\n except FoundCounterexample:\n print('FOUND COUNTEREXAMPLE EXCEPTION', flush=True)\n self.mug_initial_poses = []\n\n for i in range(self.num_mugs):\n self.mug_initial_poses += \\\n RollPitchYaw(np.random.uniform(0., 2.*np.pi, size=3)).ToQuaternion().wxyz().tolist() + \\\n [np.random.uniform(-0.1, 0.1), np.random.uniform(-0.1, 0.1), np.random.uniform(0.1, 0.2)]\n\n es = cma.CMAEvolutionStrategy(self.mug_initial_poses, 1.0/3.0,\n {'bounds': [-1.0, 1.0], 'verb_disp': 1, 'popsize': self.num_processes})\n # except torch.multiprocessing.context.TimeoutError:\n # print('timed out!', flush=True)\n # break\n except FoundMaxCounterexamples:\n print('found {} counterexamples!'.format(self.max_counterexamples), flush=True)\n break\n except:\n print(\"Unhandled unnamed exception in pycma\", flush=True)\n raise\n\n iter_num += self.num_processes\n torch.cuda.empty_cache()\n print('calling ep.terminate()', flush=True)\n ep.terminate()\n\n elapsed_time = time.time() - start_time\n print('ran for {} minutes! total number of iterations is {}, with {} sec/image'.format(\n elapsed_time/60.0, self.total_iterations.value, elapsed_time/self.total_iterations.value), flush=True)\n file_q.put('kill')\n print('probabilities:', self.all_probabilities, flush=True)\n es.result_pretty()\n\n sys.stdout.flush()", "def cAPM_Q_learning(df_train, df_test, MinRPT, MaxRPT_r1, MaxRPT, alpha,\n no_rounds):\n\n # number of periods in samples\n T = df_train.shape[0]\n T_test = df_test.shape[0]\n\n # number of agents\n K = df_test.shape[1]\n\n # individual forecasts\n F = df_train.iloc[:, 1:].values\n F_test = df_test.iloc[:, :].values\n\n # outcomes\n outcomes = df_train.iloc[:, 0].values\n\n # initialize matrices containing all predictionsa and bets\n pred_mat = np.full((no_rounds, K), np.nan, dtype=float)\n bet_mat = np.full((no_rounds, K), np.nan, dtype=float)\n\n # initialize vector containing estimated errors\n est_abs_error = np.full((no_rounds, K), np.nan, dtype=float)\n\n # initialize state identification matrix\n state_mat = np.full((no_rounds, K, 3), False, dtype=bool)\n\n # confidence in the crowd\n delta_mat = np.full((no_rounds, K, 3), 0, dtype=float)\n\n # budget initialization\n budgets = np.full(K, 1, dtype=float)\n start_budgets = np.full((no_rounds, K), 1, dtype=float)\n\n # market prediction vector\n market_pred = np.full(no_rounds, np.nan, dtype=float)\n\n # initialize vectors for storing error cluster means and counts\n # (small, medium, large)\n error_cl_mean = np.full(3, np.nan, dtype=float)\n error_cl_count = np.full(3, 0, dtype=float)\n no_errors = no_rounds*K\n\n # matrix containing the Q value for each action and state\n # dimensions: action, round, agent, state\n Q_val = np.full((2, no_rounds, K, 3), 0, dtype=float)\n\n # matrix containing starting predictions (before action) for Q-updates\n pred_mat_upd = np.full((2, no_rounds, K, 3), 0, dtype=float)\n\n # matrix containing market predictions shifted by one period for Q-updates\n market_pred_upd = np.full((no_rounds, K, 3), 0, dtype=float)\n\n ##############################\n # training the market - START#\n ##############################\n for i in range(T):\n\n # first round betting\n pred_mat[0, :] = F[i, :]\n current_bets = budgets * MaxRPT_r1\n bet_mat[0, :] = current_bets\n budgets -= current_bets\n market_pred[0] = np.dot(\n pred_mat[0, :], bet_mat[0, :]\n )/np.sum(bet_mat[0, :])\n\n # rest of rounds betting\n if i == 0:\n for j in range(1, no_rounds):\n\n pred_mat[j, :] = F[i, :]\n current_bets = budgets * MaxRPT\n bet_mat[j, :] = current_bets\n budgets -= current_bets\n\n else:\n for j in range(1, no_rounds):\n\n # screen shot start of round budgets for Q-val updates later\n np.copyto(start_budgets[j, :], budgets)\n\n # base for current invididual predictions are the last round\n # individual prediction\n np.copyto(pred_mat[j, :], pred_mat[j-1, :])\n\n # error estimates using market prediction of last round\n est_error = market_pred[j-1] - pred_mat[j, :]\n est_abs_error = np.abs(est_error)\n\n # identify in which state the agent is\n cl_dist = np.abs(error_cl_mean[:, np.newaxis] - est_abs_error)\n visited_state = np.swapaxes(np.amin(cl_dist, axis=0) == cl_dist,\n 0, 1)\n state_mat[j, :, :] = visited_state\n\n # choose action: 0-preserve, 1-change prediction\n decision = np.argmax(Q_val[:, j, visited_state], axis=0)\n pred_mat[j, :] += decision * delta_mat[j, visited_state] * est_error\n\n # reestime error after the actions are taken\n est_error = market_pred[j-1] - pred_mat[j, :]\n est_abs_error = np.abs(est_error)\n\n # estimate score\n accuracy_prime = np.maximum(100 * (1 - est_abs_error/oet), 1)\n score_prime = np.log(accuracy_prime)\n\n # bet according to estimated score\n worth_to_bet = score_prime >= 1\n current_bets = (\n worth_to_bet * MaxRPT + ~worth_to_bet * MinRPT\n ) * budgets\n bet_mat[j, :] = current_bets\n budgets -= current_bets\n\n # store the market prediction at the end of the round\n market_pred[j] = np.dot(\n pred_mat[j, :], bet_mat[j, :])/np.sum(bet_mat[j, :])\n\n # absolute errors from the current market\n abs_errors = np.abs(outcomes[i] - pred_mat)\n abs_errors_flat = abs_errors.flatten()\n\n # recompute error clusters (small, medium, large)\n error_order = np.random.choice(no_errors, no_errors, replace=False)\n\n if i == 0:\n # initialize error clusters\n for k in range(3):\n error_cl_mean[k] = abs_errors_flat[error_order[k]]\n error_cl_count[k] += 1\n for k in range(3, no_errors):\n inc_error = abs_errors_flat[error_order[k]]\n closest_cl = (np.abs(error_cl_mean - inc_error)).argmin()\n error_cl_mean[closest_cl] = (error_cl_count[closest_cl] * error_cl_mean[closest_cl] + inc_error) / (error_cl_count[closest_cl] + 1)\n error_cl_count[closest_cl] += 1\n # order: small, medium, large\n error_cl_mean = np.sort(error_cl_mean)\n # in case the cluster means do not differ (very low probability):\n # spread them\n if np.unique(error_cl_mean).size < 3:\n error_cl_mean += np.array([-1e-10, 1e-10, 2e-10])\n\n # initialize delta matrix (confidence in the crowd)\n\n else:\n for k in range(no_errors):\n inc_error = abs_errors_flat[error_order[k]]\n closest_cl = (np.abs(error_cl_mean - inc_error)).argmin()\n error_cl_mean[closest_cl] = (error_cl_count[closest_cl] * error_cl_mean[closest_cl] + inc_error) / (error_cl_count[closest_cl] + 1)\n error_cl_count[closest_cl] += 1\n # in case the cluster means do not differ (very low probability):\n # spread them\n if np.unique(error_cl_mean).size < 3:\n error_cl_mean += np.array([-1e-10, 1e-10, 2e-10])\n\n # use IQR to determine outlier error threshold\n Q3, Q1 = np.percentile(abs_errors, [75, 25])\n IQR = Q3 - Q1\n oet = Q3 + (IQR*1.5)\n\n # compute revenues of the agents based on accuracy of their predictions\n accuracy = np.maximum(100 * (1 - abs_errors/oet), 1)\n score = np.log(accuracy)\n revenue = np.multiply(score, bet_mat)\n\n # reward agents and rescale the budget (for computational reasons)\n budgets = budgets + np.sum(revenue, axis=0)\n budgets = budgets/np.sum(budgets)\n\n # update Q-values and deltas (possible after the first market)\n if i > 0:\n # update Q-values\n pred_mat_upd[:, 1:, :, :] = pred_mat[np.newaxis, :-1, :, np.newaxis]\n market_pred_upd[1:, :, :] = market_pred[:-1, np.newaxis, np.newaxis]\n pred_mat_upd[1, :, :, :] += delta_mat * (market_pred_upd - pred_mat_upd[1, :, :, :])\n abs_errors_upd = np.abs(outcomes[i] - pred_mat_upd)\n accuracy_upd = np.maximum(100 * (1 - abs_errors_upd/oet), 1)\n score_upd = np.log(accuracy_upd)\n worth_to_bet_upd = score_upd >= 1\n bet_mat_upd = np.multiply(\n worth_to_bet_upd * MaxRPT + ~worth_to_bet_upd * MinRPT,\n start_budgets[np.newaxis, :, :, np.newaxis]\n )\n potential_rev = np.multiply(score_upd, bet_mat_upd)\n Q_val += alpha*(potential_rev*state_mat[np.newaxis, :, :, :] - Q_val)\n # update deltas (confidence in the wisdom of the crowd)\n num = outcomes[i]-pred_mat\n denum = market_pred[:, np.newaxis]-pred_mat\n # prevent division by zero\n denum[denum == 0] = 1e-10\n experience = np.clip(num/denum, a_min=0, a_max=1)\n delta_mat += alpha*(experience[:, :, np.newaxis]*state_mat - delta_mat)\n\n ############################\n # training the market - END#\n ############################\n\n # using the market for the out-of-sample predictions\n pred = np.full(T_test, np.nan, dtype=float)\n for i in range(T_test):\n\n # reset to the trained market budget\n budgets_test = np.copy(budgets)\n\n # first round betting\n pred_mat[0, :] = F_test[i, :]\n current_bets = budgets_test * MaxRPT_r1\n bet_mat[0, :] = current_bets\n budgets_test -= current_bets\n market_pred[0] = np.dot(\n pred_mat[0, :], bet_mat[0, :]\n )/np.sum(bet_mat[0, :])\n\n # rest of rounds betting\n for j in range(1, no_rounds):\n\n # base for current invididual predictions are the last round\n # individual prediction\n np.copyto(pred_mat[j, :], pred_mat[j-1, :])\n\n # error estimates using market prediction of last round\n est_error = market_pred[j-1] - pred_mat[j, :]\n est_abs_error = np.abs(est_error)\n\n # identify in which state the agent is\n cl_dist = np.abs(error_cl_mean[:, np.newaxis] - est_abs_error)\n visited_state = np.swapaxes(\n np.amin(cl_dist, axis=0) == cl_dist, 0, 1)\n\n # choose action: 0-preserve, 1-change prediction\n decision = np.argmax(Q_val[:, j, visited_state], axis=0)\n pred_mat[j, :] += decision * delta_mat[j, visited_state]*est_error\n\n # reestime error after the actions are taken\n est_error = market_pred[j-1] - pred_mat[j, :]\n est_abs_error = np.abs(est_error)\n\n # estimate score\n accuracy_prime = np.maximum(100 * (1 - est_abs_error/oet), 1)\n score_prime = np.log(accuracy_prime)\n\n # bet according to estimated score\n worth_to_bet = score_prime >= 1\n current_bets = (\n worth_to_bet * MaxRPT + ~worth_to_bet * MinRPT\n ) * budgets_test\n bet_mat[j, :] = current_bets\n budgets_test -= current_bets\n\n # store the market prediction at the end of the round\n market_pred[j] = np.dot(\n pred_mat[j, :], bet_mat[j, :])/np.sum(bet_mat[j, :])\n\n # final round market prediction is the c-APM prediction\n pred[i] = market_pred[no_rounds-1]\n\n # output\n df_pred = pd.DataFrame(\n {\"c-APM (Q-learning)\": pred},\n index=df_test.index\n )\n\n return df_pred", "def pca_pubdev_4167_OOM():\n h2o.remove_all()\n transform_types = [\"NONE\", \"STANDARDIZE\", \"NORMALIZE\", \"DEMEAN\", \"DESCALE\"] # make sure we check all tranforms\n transformN = transform_types[randint(0, len(transform_types)-1)]\n print(\"transform used on dataset is {0}.\\n\".format(transformN))\n\n training_data = h2o.import_file(path=pyunit_utils.locate(\"/Users/wendycwong/gitBackup/SDatasets/pubdev_4167_Avkash/m120K.tar\")) # Nidhi: import may not work\n\n gramSVDPCA = H2OPCA(k=training_data.ncols, transform=transformN)\n gramSVDPCA.train(x=list(range(0, training_data.ncols)), training_frame=training_data)\n\n powerSVDPCA = H2OPCA(k=training_data.ncols, transform=transformN, pca_method=\"Power\")\n powerSVDPCA.train(x=list(range(0, training_data.ncols)), training_frame=training_data)\n\n # compare singular values and stuff between power and GramSVD methods\n print(\"@@@@@@ Comparing eigenvalues between GramSVD and Power...\\n\")\n pyunit_utils.assert_H2OTwoDimTable_equal(gramSVDPCA._model_json[\"output\"][\"importance\"],\n powerSVDPCA._model_json[\"output\"][\"importance\"],\n [\"Standard deviation\", \"Cumulative Proportion\", \"Cumulative Proportion\"],\n tolerance=1e-5, check_all=False)\n print(\"@@@@@@ Comparing eigenvectors between GramSVD and Power...\\n\")\n # compare singular vectors\n pyunit_utils.assert_H2OTwoDimTable_equal(gramSVDPCA._model_json[\"output\"][\"eigenvectors\"],\n powerSVDPCA._model_json[\"output\"][\"eigenvectors\"],\n powerSVDPCA._model_json[\"output\"][\"names\"], tolerance=1e-1,\n check_sign=True)", "def process(data, cluster_criteria, method = \"PP\", \\\n min_height = 0, pixel_size = 0, \\\n relax = 0, stop = 0, \\\n verbose = True, interactive = False,\n n_jobs = 1, nsteps = 1 ):\n\n#==============================================================================#\n \"\"\"\n Initial prep of key variables\n \"\"\"\n\n self = Acorns()\n start = time.time()\n\n # User input information\n self.cluster_criteria = cluster_criteria\n\n if np.size(relax) == 1:\n self.relax = relax if (relax != 0) else -1.0\n relaxcond = True if (relax != 0) else False\n else:\n self.relax = relax\n relaxcond = True\n\n if method == \"PP\":\n self.method = 0\n elif method == \"PPV\":\n self.method = 1\n elif method == \"PPP\":\n self.method = 2\n else:\n raise ValueError('method {0:s} unknown'.format(method))\n method = str(method)\n\n # Generate some important information:\n self.minnpix_cluster = get_minnpix(self, pixel_size, self.cluster_criteria[0])\n self.min_height = min_height\n self.max_dist = get_maxdist(self, pixel_size)\n self.cluster_criteria[0] = self.max_dist\n self.min_sep = 2.*self.cluster_criteria[0]\n self.nsteps = nsteps\n # Prime the acorns information:\n # cluster_arr will be updated with the indices of new clusters\n self.cluster_arr = gen_cluster_arr(self, data, stop)\n self.clusters = {}\n self.forest = {}\n\n#==============================================================================#\n \"\"\"\n Main controlling routine for acorns\n \"\"\"\n\n # Get the unassigned data array\n find_unassigned_data(self, data, stop)\n\n # Gen KDTree\n tree = generate_kdtree(self)\n\n # Generate the unassigned data array\n unassigned_array_length = len(self.unassigned_data[0,:])\n\n count= 0.0\n if verbose:\n progress_bar = print_to_terminal(self, 0, data, count, \\\n unassigned_array_length, method)\n\n # Cycle through the unassigned array\n starthierarchy = time.time()\n for i in range(0, unassigned_array_length):\n\n if verbose and (count % 1 == 0):\n progress_bar + 1\n progress_bar.show_progress()\n\n # Extract the current data point\n data_point = np.array(self.unassigned_data[:,i])\n # Retrieve this data point's location in the data array\n data_idx = get_data_index(self, data, data_point)\n self.cluster_arr[0,i] = int(data_idx)\n\n # Every data point begins as a new cluster\n self.cluster_idx = i\n bud_cluster = Cluster(data_point, data_idx, idx=self.cluster_idx, acorns=self)\n\n # Calculate distances between all data points\n link = get_links(self, i, i, tree, n_jobs)\n\n # Find clusters that are closely associated with the current data\n # point\n linked_clusters = find_linked_clusters(self, data, i, bud_cluster, link)\n\n if (self.method==1) & (len(linked_clusters) >= 1):\n linked_clusters = check_other_components(self, i, i, data_idx, data, linked_clusters, bud_cluster, tree, n_jobs, re=False)\n\n \"\"\"\n\n Notes\n -----\n\n Now try and merge this cluster with surrounding linked_clusters.\n From this point on there are three options for that data_point:\n\n 1. If no linked clusters are found - add the bud cluster to the\n cluster dictionary.\n 2. If a single linked cluster is found - merge the two.\n 3. If multiple linked clusters are found, check the validity of each\n cluster and either merge non-independent clusters or form a\n branch.\n\n This philosophy follows that of agglomerative hierarchical\n clustering techniques. The basic principle is discussed here:\n http://scikit-learn.org/stable/modules/clustering.html under\n \"2.3.6. Hierarchical Clustering\".\n\n A single link measure is used to connect clusters. The strategy is\n adapted from the general methods of:\n\n astrodendro:\n https://github.com/dendrograms/astrodendro\n Copyright (c) 2013 Thomas P. Robitaille, Chris Beaumont, Braden\n MacDonald, and Erik Rosolowsky\n quickclump:\n https://github.com/vojtech-sidorin/quickclump\n Copyright (c) 2016 Vojtech Sidorin\n\n When linking using the \"PPV\" methodology, single link measures may\n be insufficient and additional connectivity constraints are applied.\n Specifically - it is imposed that no two spectral features extracted\n from the same location can be merged into the same cluster.\n\n Additionally, an additional linking strategy is implemented which\n takes into account of the variance in the properties of the linked\n clusters (specifically those selected by the user). This is only\n implemented when trying to resolve ambiguities and is used as a way\n of establishing the \"strongest\" links when multiple spectral\n features have been detected.\n\n \"\"\"\n\n if not linked_clusters:\n add_to_cluster_dictionary(self, bud_cluster)\n elif len(linked_clusters) == 1:\n merge_into_cluster(self, data, linked_clusters[0], bud_cluster)\n else:\n resolve_ambiguity(self, data, linked_clusters, bud_cluster)\n\n if verbose:\n progress_bar.progress = 100\n progress_bar.show_progress()\n print('')\n print('')\n\n # Remove insignificant clusters from the clusters dictionary and update\n # the unassigned array\n cluster_list, cluster_indices = update_clusters(self, data)\n\n # Take a second pass at the data without relaxing the linking criteria\n # to pick up any remaining stragglers not linked during the first pass\n if (np.size(self.unassigned_data_updated)>1):\n cluster_list, cluster_indices = relax_steps(self, 0, data, method, verbose, tree, n_jobs, second_pass=True)\n endhierarchy = time.time()-starthierarchy\n\n#==============================================================================#\n \"\"\"\n Secondary controlling routine for acorns implemented if the linking\n criteria are relaxed by the user\n\n \"\"\"\n\n if relaxcond and (not interactive) and (np.size(self.unassigned_data_updated)>1):\n startrelax = time.time()\n inc = self.relax/self.nsteps\n cluster_criteria_original = cluster_criteria\n for j in range(1, self.nsteps+1):\n self.cluster_criteria = get_relaxed_cluster_criteria(j*inc, cluster_criteria_original)\n cluster_list, cluster_indices = relax_steps(self, j, data, method, verbose, tree, n_jobs, second_pass=False)\n endrelax = time.time()-startrelax\n\n elif interactive and (np.size(self.unassigned_data_updated)>1):\n startrelax = time.time()\n cluster_criteria_original = cluster_criteria\n #plotting.plot_scatter(self)\n stop = True\n while (not stop): #stop != False:\n self.relax = np.array(eval(input(\"Please enter relax values in list format: \")))\n print('')\n self.cluster_criteria = get_relaxed_cluster_criteria(self.relax, cluster_criteria_original)\n cluster_list, cluster_indices = relax_steps(self, j, data, method, verbose, tree, n_jobs, second_pass=False)\n #plotting.plot_scatter(self)\n s = str(input(\"Would you like to continue? \"))\n print('')\n stop = s in ['True', 'T', 'true', '1', 't', 'y', 'yes', 'Y', 'Yes']\n endrelax = time.time()-startrelax\n\n else:\n startrelax = time.time()\n endrelax = time.time()-startrelax\n\n#==============================================================================#\n \"\"\"\n Tidy everything up for output\n\n \"\"\"\n\n cluster_list, cluster_indices = update_clusters(self, data)\n io.reshape_cluster_array(self, data)\n get_forest(self, verbose)\n\n end = time.time()-start\n\n if verbose:\n print('acorns took {0:0.1f} seconds for completion.'.format(end))\n print('Primary clustering took {0:0.1f} seconds for completion.'.format(endhierarchy))\n if relaxcond==True:\n print('Secondary clustering took {0:0.1f} seconds for completion.'.format(endrelax))\n print('')\n print('acorns found a total of {0} clusters.'.format(len(self.clusters)))\n print('')\n print('A total of {0} data points were used in the search.'.format(len(self.unassigned_data[0,:])))\n print('A total of {0} data points were assigned to clusters.'.format(num_links(self)))\n if (np.size(self.unassigned_data_relax)>1):\n print('A total of {0} data points remain unassigned to clusters.'.format(len(self.unassigned_data_relax[0,:])))\n else:\n print('A total of 0 data points remain unassigned to clusters.')\n print('')\n\n io.housekeeping(self)\n\n return self", "def clean(self):\n # Perform the standard ACE cleaning\n max_status = mm_ace.clean(self)\n\n # Replace bad values with NaN and remove times with no valid data\n ecols = ['eflux_38-53', 'eflux_175-315']\n\n # Evaluate the electron flux data\n self[self.data['status_e'] > max_status, ecols] = np.nan\n\n # Evaluate the proton flux data\n pcols = ['pflux_47-68', 'pflux_115-195', 'pflux_310-580',\n 'pflux_795-1193', 'pflux_1060-1900']\n self[self.data['status_p'] > max_status, pcols] = np.nan\n\n # Include both fluxes and the anisotropy index in the removal eval\n eval_cols = ecols + pcols\n eval_cols.append('anis_ind')\n\n # Remove lines without any good data\n good_cols = (np.isfinite(self.data.loc[:, eval_cols])).sum(axis=1)\n bad_index = good_cols[good_cols == 0].index\n self.data = self.data.drop(index=bad_index)\n\n return", "def split_inputs(self):\n\n lca = self.lca\n inputs = self.inputs\n\n inputs_dict = {} # Only store exchanges with uncertainty\n\n # Keep track of which tech_params and bio_params are already included to the analysis\n # Needed to avoid running sa indices computations twice for the same tech or bio params. \n # Initialize with parameterized exchanges\n if self.parameters != None and self.ParametersModel != None:\n indices_tech_all = self.parameters_dict['tech_params_where']\n indices_bio_all = self.parameters_dict['bio_params_where']\n else:\n indices_tech_all = np.array([], dtype=int)\n indices_bio_all = np.array([], dtype=int)\n\n for input_ in inputs:\n\n if input_ == 'biosphere':\n continue\n\n inputs_dict[input_] = {}\n\n indices_tech = np.array([], dtype=int)\n indices_bio = np.array([], dtype=int)\n\n if input_ == 'technosphere':\n indices_tech = np.where(lca.tech_params['uncertainty_type']!=0)[0]\n if 'biosphere' in inputs:\n indices_bio = np.where(lca.bio_params['uncertainty_type']!=0)[0]\n\n elif input_ == 'demand_exc':\n # Select all products that pertain to activities in the given demand vector\n for act_index in np.nonzero(lca.demand_array)[0]:\n mask_tech = np.all([lca.tech_params['uncertainty_type']!=0, lca.tech_params['col']==act_index], axis=0)\n indices_tech = np.concatenate([indices_tech, np.where(mask_tech)[0]])\n if 'biosphere' in inputs:\n mask_bio = np.all([lca.bio_params['uncertainty_type']!=0, lca.bio_params['col']==act_index], axis=0)\n indices_bio = np.concatenate([indices_bio, np.where(mask_bio)[0]])\n\n elif input_ in self.databases:\n # Select all products and flows that are linked to the given database\n # Indices corresponding to exchanges in the tech_params depending on the given database\n db_act_indices_tech = [val for key,val in lca.activity_dict.items() if key[0]==input_]\n if len(db_act_indices_tech) > 0:\n db_act_index_min_tech = db_act_indices_tech[0]\n db_act_index_max_tech = db_act_indices_tech[-1]\n mask = lambda i : np.all( [lca.tech_params['uncertainty_type']!=0, \n lca.tech_params['col']==i,\n lca.tech_params['amount']!=0], axis=0 )\n indices_tech = [ np.where( mask(i) ) [0] for i in range(db_act_index_min_tech, db_act_index_max_tech+1) ]\n indices_tech = np.concatenate(indices_tech)\n\n # Indices corresponding to flows in the biosphere params depending on the given database\n if 'biosphere' in inputs:\n mask = lambda j : np.all( [lca.bio_params['uncertainty_type']!=0, lca.bio_params['col']==j], axis=0 )\n indices_bio = [ np.where(mask(j))[0] for j in range(db_act_index_min_tech, db_act_index_max_tech+1) ]\n indices_bio = np.concatenate(indices_bio)\n\n indices_tech = np.sort(indices_tech)\n indices_bio = np.sort(indices_bio)\n\n # Do not add indices_tech that are already in the indices_tech_all\n indices_tech_same = np.intersect1d(indices_tech, indices_tech_all)\n pos_tech = np.array([ np.where(indices_tech==s)[0] for s in indices_tech_same ]).flatten()\n indices_tech = np.delete(indices_tech, pos_tech)\n np.append(indices_tech_all, indices_tech)\n\n # Do not add indices_bio that are already in the indices_bio_all\n indices_bio_same = np.intersect1d(indices_bio, indices_bio_all)\n pos_bio = np.array([ np.where(indices_bio==s)[0] for s in indices_bio_same ]).flatten()\n indices_bio = np.delete(indices_bio, pos_bio)\n np.append(indices_bio_all, indices_bio)\n \n inputs_dict[input_]['tech_params'] = lca.tech_params[indices_tech] #TODO maybe remove later, indices should be sufficient\n inputs_dict[input_]['tech_params_where'] = indices_tech\n inputs_dict[input_]['tech_n_params'] = len(indices_tech) #TODO remove later\n\n inputs_dict[input_]['bio_params'] = lca.bio_params[indices_bio] #TODO maybe remove later\n inputs_dict[input_]['bio_params_where'] = indices_bio\n inputs_dict[input_]['bio_n_params'] = len(indices_bio)\n\n\n self.indices_tech_all = indices_tech_all #TODO remove later\n self.indices_bio_all = indices_bio_all\n self.inputs_dict = inputs_dict", "def main():\n\n # Get dataset and create pandas dataframe\n f_data = \"../data/dataset.xlsx\"\n df = pd.read_excel(f_data)\n\n # Get variables for indices\n years = list(set(df[\"Year\"][3:]))\n years_arr = df[\"Year\"][3:]\n\n # Get values from dataset\n population = df[\"Population.1\"][3:]\n auto_commuters = df[\"Auto\"][3:]\n free_traffic = df[\"Freeway\"][3:]\n arterial_traffic = df[\"Arterial Street\"][3:]\n general_time_value = df[\"Cost Components\"][3:]\n commercial_time_value = df[\"Unnamed: 12\"][3:]\n gasoline_cost = df[\"Unnamed: 13\"][3:]\n diesel_cost = df[\"Unnamed: 14\"][3:]\n excess_fuel_per_commuter = df[\"Unnamed: 20\"][3:]\n annual_hrs_of_delay = df[\"Unnamed: 24\"][3:]\n travel_time_index = df[\"Travel Time Index\"][3:]\n cost_per_autocommuter = df[\"Unnamed: 34\"][3:]\n uber = df[\"Uber Entry Dummies\"][3:]\n lyft = df[\"Lyft Entry Dummies\"][3:]\n both = df[\"UberXlyft\"][3:]\n unemployment = df[\"Unemployment Rate (%)\"][3:]\n\n # Get covariances\n filled_ump = copy.deepcopy(unemployment).fillna(value=0)\n print(\"Correlation of uber and ump: {}\".format(np.corrcoef(filled_ump, uber)))\n print(\"Correlation of lyft and ump: {}\".format(np.corrcoef(filled_ump, lyft)))\n print(\"Covariance of tti and ump: {}\".format(np.corrcoef(filled_ump,\n travel_time_index.astype(np.float32))))\n print(\"Covariance of cost and ump: {}\".format(np.corrcoef(filled_ump,\n cost_per_autocommuter.astype(np.float32))))\n print(\"Covariance of excess and ump: {}\".format(np.corrcoef(filled_ump,\n excess_fuel_per_commuter.astype(np.float32))))\n print(\"Covariance of delay and ump: {}\".format(np.corrcoef(filled_ump,\n annual_hrs_of_delay.astype(np.float32))))\n\n # Create output data structure\n year_dict = {years[i]: {\"pop\": [], \"auto\": [], \"free\": [], \"art\": [],\n \"gen_time\": [], \"comm_time\": [], \"gas\": [], \"diesel\":\n [], \"ann_delay\": [], \"travel_index\": [], \"cost\":\n [], \"ub\": [], \"ly\": [], \"bo\": [], \"ump\": [],\n \"excess_gas\": []} for i in range(len(years))}\n\n # Counter variable\n i = 0\n\n # Iterate through everything for plots\n for year, pop, auto, free, art, gen_time, comm_time, gas, diesel, excess_gas, \\\n ann_delay, travel_index, cost, ub, ly, bo, ump in \\\n zip(years_arr, population, auto_commuters, free_traffic,\n arterial_traffic, general_time_value, commercial_time_value,\n gasoline_cost, diesel_cost, excess_fuel_per_commuter,\n annual_hrs_of_delay, travel_time_index, cost_per_autocommuter,\n uber, lyft, both, unemployment):\n\n # Append values to dictionary for plotting\n year_dict[year][\"pop\"].append(pop)\n year_dict[year][\"auto\"].append(auto)\n year_dict[year][\"free\"].append(free)\n year_dict[year][\"art\"].append(art)\n year_dict[year][\"gen_time\"].append(gen_time)\n year_dict[year][\"comm_time\"].append(comm_time)\n year_dict[year][\"gas\"].append(gas)\n year_dict[year][\"diesel\"].append(diesel)\n year_dict[year][\"ann_delay\"].append(ann_delay)\n year_dict[year][\"travel_index\"].append(travel_index)\n year_dict[year][\"cost\"].append(cost)\n year_dict[year][\"ub\"].append(ub)\n year_dict[year][\"ly\"].append(ly)\n year_dict[year][\"bo\"].append(bo)\n year_dict[year][\"ump\"].append(ump)\n year_dict[year][\"excess_gas\"].append(excess_gas)\n\n # Average values according to year\n for key_i in list(year_dict.keys()):\n for key_j in list(year_dict[key_i].keys()):\n vals = copy.deepcopy(year_dict[key_i][key_j])\n year_dict[key_i][key_j] = np.mean(vals)\n\n # Now make arrays for time series data\n pop_by_year = [year_dict[years[i]][\"pop\"] for i in range(len(years))]\n auto_by_year = [year_dict[years[i]][\"auto\"] for i in range(len(years))]\n free_by_year = [year_dict[years[i]][\"free\"] for i in range(len(years))]\n art_by_year = [year_dict[years[i]][\"art\"] for i in range(len(years))]\n gen_time_by_year = [year_dict[years[i]][\"gen_time\"] for i in range(len(years))]\n comm_time_by_year = [year_dict[years[i]][\"comm_time\"] for i in range(len(\n years))]\n gas_by_year = [year_dict[years[i]][\"gas\"] for i in range(len(years))]\n diesel_by_year = [year_dict[years[i]][\"diesel\"] for i in range(len(years))]\n ann_delay_by_year = [year_dict[years[i]][\"ann_delay\"] for i in range(len(\n years))]\n travel_index_by_year = [year_dict[years[i]][\"travel_index\"] for i in\n range(len(years))]\n cost_by_year = [year_dict[years[i]][\"cost\"] for i in range(len(years))]\n ub_by_year = [year_dict[years[i]][\"ub\"] for i in range(len(years))]\n ly_by_year = [year_dict[years[i]][\"ly\"] for i in range(len(years))]\n bo_by_year = [year_dict[years[i]][\"bo\"] for i in range(len(years))]\n ump_by_year = [year_dict[years[i]][\"ump\"] for i in range(len(years))]\n excess_gas_per_year = [year_dict[years[i]][\"excess_gas\"] for i in range(len(\n years))]\n\n\n # Make plots\n plt.plot(years, pop_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Average Population of UMR Urban Centers (1000s)\")\n plt.title(\"Average Population of Urban Mobility Report Urban Centers over Time\")\n plt.savefig(\"../graphs/pop_vs_time.png\")\n plt.clf()\n\n plt.plot(years, auto_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Autocommuters (1000s)\")\n plt.title(\"Average Number of Autocommuters in UMI Urban Centers (1000s)\")\n plt.savefig(\"../graphs/auto_vs_time.png\")\n plt.clf()\n\n plt.plot(years, free_by_year, color=\"b\", label=\"Freeways\")\n plt.plot(years, art_by_year, color=\"r\", label=\"Arterial Roads\")\n plt.legend()\n plt.xlabel(\"Year\")\n plt.ylabel(\"Driving Distance (miles)\")\n plt.title(\"Average Net Freeway/Arterial Road Driving over Time (\"\n \"1000s of miles)\")\n plt.savefig(\"../graphs/dist_vs_time.png\")\n plt.clf()\n\n plt.plot(years, gen_time_by_year, color=\"b\", label=\"General Value\")\n plt.plot(years, comm_time_by_year, color=\"r\", label=\"Commercial Value\")\n plt.legend()\n plt.xlabel(\"Year\")\n plt.ylabel(\"Value ($/hr)\")\n plt.title(\"Average General and Commercial Values of Time over Time\")\n plt.savefig(\"../graphs/val_of_time_vs_time.png\")\n plt.clf()\n\n plt.plot(years, gas_by_year, color=\"b\", label=\"Gasoline\")\n plt.plot(years, diesel_by_year, color=\"r\", label=\"Diesel\")\n plt.legend()\n plt.xlabel(\"Year\")\n plt.ylabel(\"Cost ($/gallon)\")\n plt.title(\"Average Cost of Gasoline and Diesel Fuel over Time\")\n plt.savefig(\"../graphs/gas_vs_time.png\")\n plt.clf()\n\n plt.plot(years, ann_delay_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Annual per-Commuter Traffic Delays (hrs)\")\n plt.title(\"Average Annual per-Commuter Traffic Delays over Time\")\n plt.savefig(\"../graphs/delay_vs_time.png\")\n plt.clf()\n\n plt.plot(years, travel_index_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Travel Index\")\n plt.title(\"Average Travel Index over Time\")\n plt.savefig(\"../graphs/index_vs_time.png\")\n plt.clf()\n\n plt.plot(years, ump_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Unemployment Rate (%)\")\n plt.title(\"Average Unemployment Rate over Time\")\n plt.savefig(\"../graphs/ump_vs_time.png\")\n plt.clf()\n\n plt.plot(years, cost_by_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Cost ($)\")\n plt.title(\"Average Annual per-Capita Cost of Traffic Congestion over Time\")\n plt.savefig(\"../graphs/cost_vs_time.png\")\n plt.clf()\n\n plt.plot(years, excess_gas_per_year)\n plt.xlabel(\"Year\")\n plt.ylabel(\"Excess Fuel Consumed (Gallons)\")\n plt.title(\"Average Annual per-Capita Excess Fuel Consumed over Time\")\n plt.savefig(\"../graphs/extra_fuel_vs_time.png\")\n plt.clf()\n\n x = list(lyft) # Lyft data\n y = list(uber) # Uber data\n bins = [2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018]\n\n plt.hist([x, y], bins, label=['Lyft', 'Uber'])\n plt.legend(loc='upper right')\n plt.xlabel(\"Year\")\n plt.ylabel(\"Number of cities entered\")\n plt.title(\"Uber and Lyft Entry into Urban Mobility Report Cities\")\n plt.clf()", "def main(desc_key, fxyz, peratom, scale, pca_d, keep_raw=False, output=None, prefix='ASAP'):\n\n if output is None:\n output = prefix + \"-pca-d\" + str(pca_d) + '.xyz'\n peratom = bool(peratom)\n\n # read the xyz file\n frames = ase.io.read(fxyz, ':')\n n_frames = len(frames)\n print('load xyz file: ', fxyz, ', a total of ', str(n_frames), 'frames')\n\n # extract the descriptors from the file\n desc = []\n if n_frames == 1 and not peratom:\n raise RuntimeError('Per-config PCA not possible on a single frame')\n\n # retrieve the SOAP vectors --- both of these throw a ValueError if any are missing or are of wrong shape\n if peratom:\n desc = np.concatenate([a.get_array(desc_key) for a in frames])\n else:\n desc = np.row_stack([a.info[desc_key] for a in frames])\n\n # scale & center\n if scale:\n from sklearn.preprocessing import StandardScaler\n scaler = StandardScaler()\n print('DEBUG: {}'.format(desc.shape))\n print(scaler.fit(desc))\n desc = scaler.transform(desc) # normalizing the features\n\n # fit PCA\n proj, pvec = pca(desc, pca_d)\n # could do with sklearn as well\n # from sklearn.decomposition import PCA\n # pca_sklearn = PCA(n_components=4) # can set svd_solver\n # proj = pca_sklearn.fit_transform(desc)\n # pvec = pca_sklearn.components_\n\n # add coords to info/arrays\n if peratom:\n running_index = 0\n for at in frames:\n n_atoms = len(at)\n at.arrays['pca_coord'] = proj[running_index:running_index + n_atoms, :].copy()\n running_index += n_atoms\n\n if not keep_raw:\n for at in frames:\n del at.arrays[desc_key]\n else:\n for i, at in enumerate(frames):\n at.info['pca_coord'] = proj[i]\n\n if not keep_raw:\n for at in frames:\n del at.info[desc_key]\n\n # save\n ase.io.write(output, frames, write_results=False)", "def initializeData():\n\n # Read in the CSV\n allX = pd.read_csv('completeData.csv', keep_default_na=False)\n xValues = pd.read_csv('formattedXValues.csv')\n filename = \"completeData.csv and formattedXValues.csv\"\n\n # Separate the CSV columns into array variables and numpy vars to store new categorical variables\n mixNum = allX['Mix Number']\n mixP = allX['Mix Proportion']\n mixPFinal = np.empty(len(mixP))\n scm = allX['SCM']\n scmFinal = np.empty(len(scm))\n fineA = allX['Fine Aggregate']\n fineAFinal = np.empty(len(fineA))\n coarseA = allX['Coarse Aggregate']\n coarseAFinal = np.empty(len(coarseA))\n\n # Loop through every mix in the csv file\n # Not sure how to do 3 different variables\n for y in range(0, len(mixNum)):\n # Sort Mix Proportions\n if mixP[y] == \"A-F\":\n mixPFinal[y] = 2\n elif mixP[y] == \"A-S\":\n mixPFinal[y] = 1\n elif mixP[y] == \"A\":\n mixPFinal[y] = 0\n else:\n print('Unidentified Variable in mixP: ')\n print(mixP[y])\n\n # Sort SCM into slag or fly ash\n if scm[y] == 'N/A':\n scmFinal[y] = 1000\n elif scm[y] == 'Slag 1':\n scmFinal[y] = 0\n elif scm[y] == 'Slag 2':\n scmFinal[y] = 0\n elif scm[y] == 'Fly Ash 1':\n scmFinal[y] = 1\n elif scm[y] == 'Fly Ash 2':\n scmFinal[y] = 1\n elif scm[y] == 'Fly Ash 3':\n scmFinal[y] = 1\n else:\n print('Unidentified Variable in scm: ')\n print(scm[y])\n\n # Sort the fine aggregate\n if fineA[y] == 'Sand A':\n fineAFinal[y] = 0\n elif fineA[y] == 'Sand B':\n fineAFinal[y] = 1\n else:\n print('Unidentified Variable in fineA: ')\n print(fineA[y])\n\n # Sort the coarse aggregate\n if coarseA[y] == 'GG1':\n coarseAFinal[y] = 0\n elif coarseA[y] == 'GG2':\n coarseAFinal[y] = 0\n elif coarseA[y] == 'GG3':\n coarseAFinal[y] = 0\n elif coarseA[y] == 'GG4':\n coarseAFinal[y] = 0\n elif coarseA[y] == 'GG5':\n coarseAFinal[y] = 0\n elif coarseA[y] == 'GG6':\n coarseAFinal[y] = 0\n elif coarseA[y] == 'CS1':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS2':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS3':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS4':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS5':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS6':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS7':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS8':\n coarseAFinal[y] = 1\n elif coarseA[y] == 'CS9':\n coarseAFinal[y] = 1\n else:\n print('Unidentified Variable in coarseA: ')\n print(coarseA[y])\n\n # One Hot Encode the sorted variables\n encodedMixP = pd.get_dummies(mixPFinal)\n encodedSCM = pd.get_dummies(scmFinal)\n encodedFineA = pd.get_dummies(fineAFinal)\n encodedCoarseA = pd.get_dummies(coarseAFinal)\n\n # Update the headers for onehotencoded variables\n # Get the current variable names\n encodedSCMlist = list(encodedSCM.columns.values)\n encodedFineAlist = list(encodedFineA.columns.values)\n encodedCoarseAlist = list(encodedCoarseA.columns.values)\n encodedMixPlist = list(encodedMixP.columns.values)\n # go through and replace the current names with the updated ones\n encodedSCM.rename(columns={encodedSCMlist[0]: 'SCM_0', encodedSCMlist[1]: 'SCM_1', encodedSCMlist[2]: 'SCM_1000'},\n inplace=True)\n encodedFineA.rename(columns={encodedFineAlist[0]: 'FineA_0', encodedFineAlist[1]: 'FineA_1'}, inplace=True)\n encodedCoarseA.rename(columns={encodedCoarseAlist[0]: 'CoarseA_0', encodedCoarseAlist[1]: 'CoarseA_1'},\n inplace=True)\n encodedMixP.rename(columns={encodedMixPlist[0]: 'MixP_0', encodedMixPlist[1]: 'MixP_1', encodedMixPlist[2]: 'MixP_2'},\n inplace=True)\n\n # Remake the dataframe to include the onehotencoded columns instead of the regular columns.\n firstHalf = allX.ix[:, :21]\n cte = allX.ix[:, 25]\n oneHotEncodedframe = pd.concat([encodedMixP, encodedSCM, encodedFineA, encodedCoarseA], axis=1)\n secondHalf = xValues.ix[:, 6:]\n completearray = pd.concat([firstHalf, cte, oneHotEncodedframe, secondHalf], axis=1)\n variablenames = list(completearray.columns.values)\n # convert to numpy array\n completenumpyarray = completearray.as_matrix()\n\n # remove the first 15 rows in the array to clear the NaN entries\n completenumpyarray = completenumpyarray[15:, :]\n # Also, remove the columns that include mix A as well as SCM_1000\n\n #####\n # Now, Ask whether or not to run decision trees on batch A data or batch B\n batch = input(\"which batch to run tests on (A or B)? \")\n\n if batch == \"A\":\n\n # break up the data into the batch A values\n batchAYcolumns = [0, 5, 6, 7, 8, 21]\n yvariables = np.transpose(completenumpyarray[:, batchAYcolumns])\n numyvariables = 6\n yvariablenames = [variablenames[x] for x in batchAYcolumns]\n batchAXcolumns = [23, 24, 25, 26, 28, 29, 30, 31, 32, 35, 38, 41]\n # normalize the x variables. Will normalize y variables in the main body\n # after a histogram of the data is created.\n xvariables = completenumpyarray[:, batchAXcolumns]\n # Normalize each of the x variables\n # get number of columns of x variables\n xVariablesShape = xvariables.shape\n # index through each of the columns and find the l2 norm\n for p in range(0, xVariablesShape[1]):\n x_mean = xvariables[:, p].mean()\n x_std = xvariables[:, p].std()\n # index through each value of the column (thus, go through each row) and divide by the l2 norm\n xvariables[:, p] = (xvariables[:, p] - x_mean) / x_std\n xvariablenames = [variablenames[x] for x in batchAXcolumns]\n\n elif batch == \"B\":\n\n # break up the data into the batch B values\n batchBYcolumns = [0, 1, 2, 3, 4, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]\n yvariables = np.transpose(completenumpyarray[:, batchBYcolumns])\n numyvariables = 17\n yvariablenames = [variablenames[x] for x in batchBYcolumns]\n batchBXcolumns = [23, 24, 25, 26, 28, 29, 30, 31, 33, 36, 39, 42]\n # normalize the x variables. Will normalize y variables in the main body\n # after a histogram of the data is created.\n xvariables = completenumpyarray[:, batchBXcolumns]\n # Normalize each of the x variables\n # get number of columns of x variables\n xVariablesShape = xvariables.shape\n # index through each of the columns and find the l2 norm\n for p in range(0, xVariablesShape[1]):\n x_mean = xvariables[:, p].mean()\n x_std = xvariables[:, p].std()\n # index through each value of the column (thus, go through each row) and divide by the l2 norm\n xvariables[:, p] = (xvariables[:, p] - x_mean) / x_std\n xvariablenames = [variablenames[x] for x in batchBXcolumns]\n\n else:\n print(\"Invalid Input.\")\n exit(0)\n\n return completenumpyarray, xvariables, filename, xvariablenames, yvariablenames, numyvariables, yvariables, batch", "def pca_data(train_data_lst, test_data_lst, data_anots):\r\n \r\n train_data_pca = []\r\n test_data_pca = []\r\n new_anots = []\r\n\r\n for idx in range(len(train_data_lst)):\r\n pca = PCA(n_components=0.985)\r\n X_train = pca.fit_transform(train_data_lst[idx])\r\n train_data_pca.append(X_train)\r\n \r\n X_test = pca.transform(test_data_lst[idx])\r\n test_data_pca.append(X_test)\r\n new_anots.append(data_anots[idx]+'_pca')\r\n return train_data_pca, test_data_pca, new_anots", "def main(argv=None):\n args = inputs(argv)\n site = args.site\n node = args.node\n sensor = args.sensor\n method = args.method\n stream = args.stream\n deploy = args.deploy\n start = args.start\n stop = args.stop\n\n # check if we are specifying a deployment or a specific date and time range\n if not deploy or (start and stop):\n return SyntaxError('You must specify either a deployment number or beginning and end dates of interest.')\n\n # if we are specifying a deployment number, then get the data from the Gold Copy THREDDS server\n if deploy:\n optaa = load_gc_thredds(site, node, sensor, method, stream, ('.*deployment%04d.*OPTAA.*\\\\.nc$' % deploy))\n cal_file = ('{}-{}-{}.deploy{:02d}.cal_coeffs.json'.format(site, node, sensor, deploy))\n\n # check to see if we downloaded any data\n if not optaa:\n exit_text = ('Data unavailable for %s-%s-%s, %s, %s, deployment %d.' % (site, node, sensor, method,\n stream, deploy))\n raise SystemExit(exit_text)\n\n else:\n # otherwise, request the data for download from OOINet via the M2M API using the specified dates\n r = m2m_request(site, node, sensor, method, stream, start, stop)\n if not r:\n exit_text = ('Request failed for %s-%s-%s, %s, %s, from %s to %s.' % (site, node, sensor, method,\n stream, start, stop))\n raise SystemExit(exit_text)\n\n # OPTAA data is different from other instruments. it needs to be processed on a per-deployment basis in order\n # to get the correct number of wavelengths before it can be merged into a single dataset. create a list of\n # all the files that were returned by the M2M request, and determine the deployments that are included in the\n # request\n files = list_files(r['allURLs'][0], '.+OPTAA.+\\\\.nc$')\n if not files:\n exit_text = ('Data unavailable for %s-%s-%s, %s, %s, from %s to %s.' % (site, node, sensor, method,\n stream, start, stop))\n raise SystemExit(exit_text)\n\n deployments = np.unique([int(sub.split('/')[3][10:14]) for sub in files])\n\n # loop through the deployments and download the data for each one\n optaa = []\n cal_file = []\n for deploy in deployments:\n # Valid M2M request, download the data on a per-deployment basis\n data = m2m_collect(r, ('.*deployment%04d.*OPTAA.*\\\\.nc$' % deploy))\n if data:\n optaa.append(data)\n cal_file.append('{}-{}-{}.deploy{:02d}.cal_coeffs.json'.format(site, node, sensor, deploy))\n\n # check to see if we downloaded any data (remove empty/none entries from the list)\n if not optaa:\n exit_text = ('Data unavailable for %s-%s-%s, %s, %s, from %s to %s.' % (site, node, sensor, method,\n stream, start, stop))\n raise SystemExit(exit_text)\n\n # set up the calibration file path and name(s)\n out_file = os.path.abspath(args.outfile)\n cal_path = os.path.dirname(out_file)\n if not os.path.exists(cal_path):\n os.makedirs(cal_path)\n\n # clean-up and reorganize the data\n multi = isinstance(optaa, list)\n if node in ['SF01A', 'SF01B', 'SF03A']:\n # this OPTAA is on a shallow profiler\n if multi:\n for i, ds in enumerate(optaa):\n cfile = os.path.join(cal_path, cal_file[i])\n optaa[i] = optaa_profiler(ds, cfile)\n optaa = xr.concat(optaa, dim='time')\n else:\n cal_file = os.path.join(cal_path, cal_file)\n optaa = optaa_profiler(optaa, cal_file)\n else:\n # this OPTAA is on one of the two benthic platforms\n if multi:\n for i, ds in enumerate(optaa):\n cfile = os.path.join(cal_path, cal_file[i])\n optaa[i] = optaa_benthic(ds, cfile)\n optaa = xr.concat(optaa, dim='time')\n else:\n optaa = optaa_benthic(optaa, cal_file)\n\n # get the vocabulary information for the site, node, and sensor and update the dataset attributes\n vocab = get_vocabulary(site, node, sensor)[0]\n optaa = optaa.sortby(['deployment', 'time'])\n optaa = update_dataset(optaa, vocab['maxdepth'])\n\n # save the data to disk\n if not os.path.exists(os.path.dirname(out_file)):\n os.makedirs(os.path.dirname(out_file))\n optaa.to_netcdf(out_file, mode='w', format='NETCDF4', engine='h5netcdf', encoding=ENCODINGS)", "def _reset_parameters(self) -> None:\n self._setup_input = {\n \"P\": csc_matrix(2.0 * self.opt.P(self.p).toarray()),\n \"q\": self.opt.q(self.p).toarray().flatten(),\n }\n if self.opt_type in CONSTRAINED_OPT:\n A = self.opt.A(self.p)\n b = self.opt.b(self.p)\n self._setup_input[\"A\"] = csc_matrix(\n cs.vertcat(self.opt.M(self.p), A, -A).toarray()\n )\n self._setup_input[\"l\"] = (\n cs.vertcat(-self.opt.c(self.p), -b, b).toarray().flatten()\n )", "def main():\n\n # Script arguments... \n \"\"\" If running as standalone, hardcode theWorkspace and inFile \"\"\"\n theWorkspace = arcpy.GetParameterAsText(0)\n if not theWorkspace:\n theWorkspace = r\"d:\\_dataTest\"\n arcpy.env.workspace = theWorkspace\n arcpy.env.overwriteOutput = True\t\n\n inFile = arcpy.GetParameterAsText(1)\n if not inFile:\n inFile = \"updateMultipleSourcePaths.csv\"\n inFile = r\"\\\\dfg.alaska.local\\gis\\Anchorage\\GISStaff\\___gisStaffConnections\\RepairBrokenSrcAug242015.csv\"\n\n outWorkspace = arcpy.GetParameterAsText(2)\n if not outWorkspace:\n outWorkspace = os.path.join(theWorkspace, \"_repaired\")\n '''if not os.path.isdir(outWorkspace): \n os.makedirs(outWorkspace)\n myMsgs(\"created new directory {0} \\n\".format(outWorkspace))'''\n\n # Create .txt Report of what it thinks was fixed, tagged with YYYYMMDD_HHMM\n outFile = \"FixedReport\"\n fileDateTime = curFileDateTime()\n currentDate = curDate()\n outfileTXT = os.path.join(theWorkspace, outFile) + fileDateTime + \".txt\" \n myMsgs (outFile)\n reportFile = open(outfileTXT, 'w')\n myMsgs( \"File {0} is open? {1}\".format(outfileTXT, str(not reportFile.closed)))\n outText = \"Report for what it THINKS it repaired in {0}, on {1} \\n \".format(theWorkspace, currentDate)\n outText += \" Includes coverages (pts, poly, arc, anno), shapes, and FGDB data.\" + '\\n'\n outText += \"-----------------------------------------------------\" + '\\n' \n reportFile.write(outText)\t\n\n mxd = None\n outMXDName = \"none\"\n updatePath = []\n cvrList = [r\"\\arc\", r\"\\polygon\", r\"\\region\", r\"\\point\", r\"\\tic\" ]\n lstExtDatatype = [[\".shp\", \"SHAPEFILE_WORKSPACE\" ], [\".sde\",\"SDE_WORKSPACE\"], \n [\".mdb\", \"ACCESS_WORKSPACE\" ], [\".gdb\", \"FILEGDB_WORKSPACE\"], \n [\"cover\", \"ARCINFO_WORKSPACE\"]]\t\n cntMXD = 0\n cntFixed = 0\n cntTotalFixed = 0\n\n # makes sure the .csv file exists\n if arcpy.Exists(inFile):\n myMsgs (\"->Using {0} to repair paths.\\n==============================\".format(inFile))\n # walks thru the workspace to create list of files \n for root, dirs, files in os.walk(theWorkspace): \t\t\n for fileName in files:\n if root == outWorkspace: # don't process mxd's in the target directory\n pass\n else:\n fullPath = os.path.join(root, fileName)\n basename, extension = os.path.splitext(fileName)\n # Only process .mxd files\n if extension == \".mxd\":\n myMsgs(\"\\nReviewing MXD: {0}\".format(fullPath))\n reportFile.write(\"\\nReviewing MXD: {0}\".format(fullPath))\n mxd = arcpy.mapping.MapDocument(fullPath)\n dfs = arcpy.mapping.ListDataFrames(mxd)\n cntMXD += 1\n cntFixed = 0\n basename, extension = os.path.splitext(fileName)\n # New output mxd name....\n outMXDName = os.path.join(outWorkspace, (str(basename) + \".mxd\")) #\"_fix.mxd\"))\n # create list of the tables since they are handle differently\n theTables = arcpy.mapping.ListTableViews(mxd)\n # Loops thru dataframes so adding and deleting Services will work.\n for df in dfs:\n # Loops thru layers, checks for broken links and tries to repair\n lyrList = arcpy.mapping.ListLayers(mxd, \"\", df)\n for lyr in lyrList:\n if lyr.isBroken:\n if not lyr.supports(\"DATASOURCE\") and not lyr.isServiceLayer:\n myMsgs(\" ->Skipping {0} not a Service layer, and does not support DATASOURCE\".format(lyr.name))\n pass #continue\n elif not lyr.supports(\"DATASOURCE\") and lyr.isServiceLayer:\n myMsgs(\" -Broken Service: {0}\".format(lyr.name))\n else:\n myMsgs(\" -Broken: {0}\".format(lyr.dataSource))\n #myMsgs(\"layer is Group {0} or ServiceLayer {1}\".format(lyr.isGroupLayer, lyr.isServiceLayer))\n if (lyr.isGroupLayer or (\"Events\" in lyr.name)) and (not lyr.isServiceLayer): # Groups and Event FC skipped\n myMsgs(\" ...skipping group or event: {0}\".format(lyr.name))\n reportFile.write(\"\\n *skipping group or event: {0} \\n\".format(lyr.name))\n pass #break\n elif lyr.isServiceLayer: # services might have to be handle differently\n if lyr.supports(\"SERVICEPROPERTIES\"):\n for spType, spName in lyr.serviceProperties.iteritems():\n myMsgs(\" Service Properties: {0}: {1}\".format(spType, spName ))\n if spType == \"URL\": \n dataSource = str(spName)\n lyrType = (\"service_{}\".format(lyr.name))\n break\n myMsgs(\" ->this ia a service....using add and remove layer\")\n updatePath = findUpdatePath(inFile, dataSource, lyrType.strip().lower())\n newDSPath, newDSName = os.path.split(updatePath[0])\n if (\"service\" in updatePath[3]) and (\"service\" in updatePath[1]):\n insertLayer = arcpy.mapping.Layer(updatePath[0])\n print(\"dataframe: {0}\".format(df))\n arcpy.mapping.InsertLayer(df, lyr, insertLayer, \"AFTER\")\n arcpy.mapping.RemoveLayer(df, lyr)\n reportFile.write(\"\\n ->sees this as service....{0} \\n\".format(dataSource))\n # will still look at deleted version after insert, not the new version..\n # isBroken will give false info even if fixed, so \n # don't use myMsgs(\"Still broken? {0}\".format(lyr.isBroken)) \n else:\n myMsgs(\" --> a service layer but no SERVICE PROPERTIES\")\n elif lyr.supports(\"DATASOURCE\") and lyr.supports(\"DATASETNAME\"): \n # not a group, event or what it thinks is a service\n updatePath = findUpdatePath(inFile, lyr.dataSource, \"\")\n newDSPath, newDSName = os.path.split(updatePath[0])\n sameType = updatePath[2] \n for cvr in cvrList: #checks to see if the source layer is a coverage...must handle different\n if cvr in lyr.dataSource:\n sourceIsCoverage = True\n break\n else:\n sourceIsCoverage = False\n # updatePath[1] is False if there wasn't a match\n # so \"not update[1]\" means no match was found, and moves to next layer\t\t\t\t\t\t\t\t\n if not updatePath[1]: # if no match was found\n myMsgs(\" !! no match to: {0} \".format(lyr.dataSource))\n updateStatus = \"no match, not changed\" # used for message only\n pass\n elif updatePath[1].strip().lower() == \"drive\":\n myMsgs(\" skipping drive-letter matches for now: {0}\".format(lyr.dataSource))\n updateStatus = \"can only find drive match...look into it)\"\n pass\n elif updatePath[1].strip().lower() == \"_review\":\n myMsgs(\" no new source assigned yet for: {0}\".format(lyr.dataSource))\n updateStatus = (\"review and update {0}\".format(inFile))\n pass\n else: #if lyr.supports(\"DATASOURCE\") and lyr.supports(\"DATASETNAME\"):\n updateStatus = str(updatePath[0]) # used for message only\n if lyr in theTables:\n #myMsgs(\" thinks its a table....using findAndReplsWorkspacePath\")\n myMsgs(\" *Moving {0}: {1} to new: {2}\".format(updatePath[3], lyr.dataSource, updatePath[0]))\n reportFile.write(\"\\n Moving {0}: {1} to new: {2} \\n\".format(updatePath[3], lyr.dataSource, updatePath[0]))\n lyr.findAndReplaceWorkspacePath(lyr.dataSource, updatePath, False) \n elif lyr.isRasterLayer:\n #myMsgs(\" thinks its a raster....using findAndReplsWorkspacePath\")\n myMsgs(\" *Moving {0}: {1} to new: {2}\".format(updatePath[3], lyr.dataSource, updatePath[0]))\n reportFile.write(\"\\n Moving {0}: {1} to new: {2} \\n\".format(updatePath[3], lyr.dataSource, updatePath[0]))\n newType = \"RASTER_WORKSPACE\"\n for extType in lstExtDatatype:\n if extType[0] in updatePath[0]:\n newType = extType[1] \n if extType[0] == '.gdb':\n newDSPath = newDSPath.split('.gdb', 1)[0] + '.gdb'\n #newType = extType[1]\n elif extType[0] == '.sde':\n newDSPath = newDSPath.split('.sde', 1)[0] + '.sde'\n break \n lyr.replaceDataSource(newDSPath, newType, newDSName, False)\n if not sameType:\n testOldTOC = updatePath[4].strip('\\\\')\n if lyr.name == testOldTOC:\n lyr.name = lyr.datasetName\n else:\n newType = updatePath[1] \n if sourceIsCoverage and sameType:\n newDSPath = os.path.split(newDSPath)[0]\n newType = \"ARCINFO_WORKSPACE\"\n for extType in lstExtDatatype:\n if extType[0] in updatePath[0]:\n newType = extType[1]\n if extType[0] == '.gdb':\n newDSPath = newDSPath.split('.gdb', 1)[0] + '.gdb'\n #newType = extType[1]\n elif extType[0] == '.sde':\n newDSPath = newDSPath.split('.sde', 1)[0] + '.sde'\n\n break\n print(\"line ~281 newType is: {0}\".format(newType))\n myMsgs(\" *Moving {0}: {1} to new: {2}\".format(updatePath[3], lyr.dataSource, updatePath[0]))\n reportFile.write(\"\\n Moving {0}: {1} to new: {2}\".format(updatePath[3], lyr.dataSource, updatePath[0]))\n lyr.replaceDataSource(newDSPath, newType, newDSName, False)\n #myMsgs(\" new datasource: {0}\".format(lyr.dataSource))\n myMsgs(\" **the new data source: {0}\".format(updateStatus))\n cntFixed += 1\n myMsgs(\" Still broken? {0}\".format(lyr.isBroken))\n else:\n myMsgs(\"not sure what it is, but can't process {0}\".format(lyr.name))\n \n else:\n myMsgs(\" -Not Broken: {0}\".format(str(lyr)))\n\n myMsgs(\" Number of links fixed processed: {0}\".format(cntFixed))\n myMsgs(\" -{0} Review complete.\".format(fullPath))\n reportFile.write(\" -Number of links fixed processed: {0} \\n\".format(cntFixed))\t\t\t\t\t\t\n reportFile.write(\" -{0} Review complete. \\n\\n\".format(fullPath))\n\n if cntFixed > 0:\n mxd.save()\n myMsgs(\"saved to {0}\".format(fullPath))\n reportFile.write(\"saved to {0}\".format(fullPath))\n cntTotalFixed += cntFixed\n cntFixed = 0\n \"\"\"if cntFixed > 0:\n\t\t\t\t\t\t\tmxd.saveACopy(outMXDName, '10.1')\n\t\t\t\t\t\t\tmyMsgs(\"saved to {0}\".format(outMXDName))\n\t\t\t\t\t\t\tcntFixed = 0\"\"\"\n '''if arcpy.Exists(outMXDName):\n outMXDName.()\n myMsgs(\"saved 1\")\n else:\n mxd.saveACopy(outMXDName, '10.1')\n myMsgs(\"saved 2\")'''\n del mxd\n cntFixed = 0\n else:\n myMsgs (\"ERROR: Required repair source list: [0] does not exit. \\n\".format(inFile))\n outText = (\"\\n\\n ==========================================\")\n outText += (\"\\n Number of MXD's processed: {0} \\n\".format(cntMXD))\n outText += (\" Total Number of links it fixed, all mxds: {0} \\n\".format(cntTotalFixed) )\n\n myMsgs(\" {0}\".format(outText))\n\n reportFile.write(outText)\n # close the .txt file, \n reportFile.close()\n myMsgs( \"File {0} is closed? {1}\".format(outfileTXT, str(reportFile.closed)))\t\n\n myMsgs('!!! Success !!! ')" ]
[ "0.6758269", "0.5737575", "0.5548193", "0.55029297", "0.5438735", "0.5360984", "0.5309265", "0.5304964", "0.52679616", "0.52612615", "0.5233207", "0.5211206", "0.5194293", "0.51810217", "0.51801294", "0.5130102", "0.5125658", "0.51204294", "0.51127857", "0.5099757", "0.5076984", "0.5068695", "0.506328", "0.505041", "0.5037173", "0.503172", "0.50252634", "0.5019474", "0.5018827", "0.5015913" ]
0.6933069
0
Create a new wavelength named name, belonging to XCrystal object crystal, with wavelength and optionally f_pr, f_prpr assigned.
def __init__( self, name, crystal, wavelength, f_pr=0.0, f_prpr=0.0, dmin=0.0, dmax=0.0 ): # set up this object self._name = name self._crystal = crystal self._wavelength = wavelength self._f_pr = f_pr self._f_prpr = f_prpr self._resolution_high = dmin self._resolution_low = dmax # then create space to store things which are contained # in here - the sweeps self._sweeps = []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, crystal, wavelength=1.54184, max2theta=180):\n self.wavelength = wavelength\n self.max2theta = np.radians(max2theta)\n self.name = crystal.name\n self.all_dhkl(crystal)\n self.atom_scatter(crystal)\n self.structure_factor(crystal)\n self.rec_matrix = crystal.rec_matrix\n self.intensity()\n self.pxrd()", "def __init__(self, name, number, wavelength):\n\n self.name = name\n self.number = number\n self.wavelength = wavelength", "def __init__(self, crystal, wavelength=1.54184, \n thetas = [0, 180], \n preferred_orientation = False, march_parameter = None):\n self.profiling = None\n self.wavelength = wavelength\n self.min2theta = np.radians(thetas[0])\n self.max2theta = np.radians(thetas[1])\n self.name = crystal.get_chemical_formula()\n self.preferred_orientation = preferred_orientation\n self.march_parameter = march_parameter\n self.all_dhkl(crystal)\n self.intensity(crystal)\n self.pxrdf()", "def PM_setWavelength(self,channel,wavelength):\n if channel not in ApexAP1000.PM_CHANNELS:\n raise ValueError('Unknow channel during power measurement')\n sentStr = self.headStr('PM')+'SETWAVELENGTH[%d] %g'%(channel,wavelength)\n return self.write(sentStr)", "def setUpWavelength(self,wavelength):\n if wavelength == None:\n self.wavelength = getDefaultWavelength()\n else:\n self.wavelength = wavelength\n return self", "def storeWavelengths(self, nm):\n pre = \"w,0\"\n d = {\"wavelength_nm\": list(nm)}\n self._writeline(pre, str(d))", "def __init__(self, wave=None, flux=None, central_wave=None, fwhm=None, velocity=False, fluxunits=FLAMBDA_CGS, waveunits=u.angstrom, name=''):\n self.wave = wave\n self.flux = flux\n self.fwhm = None\n self.velocity = None\n \n self.fluxunits = fluxunits\n self.waveunits = waveunits\n self.name = ''\n \n if (central_wave is not None) & (fwhm is not None):\n self.fwhm = fwhm\n self.velocity = velocity\n \n self.wave, self.flux = self.make_gaussian(central_wave, fwhm,\n wave_grid=wave,\n velocity=velocity,\n max_sigma=10)\n \n self.fnu_units = FNU_CGS\n self.to_fnu()", "def set_wavelength(self, wavelength):\n print('Setting Santec wavelength to %.4f nm' % wavelength)\n\n # We need to select which of the 4 lasers to select depending on\n # the desired wavelength\n\n if 1530.0 < wavelength < 1630.000001:\n self.santec1.write(\"SW 4\")\n self.santec4.write(\"WA %.4f\" % wavelength)\n if self.active_module != 4:\n self.active_module = 4\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n elif 1440.0 < wavelength < 1530.1:\n self.santec1.write(\"SW 3\")\n self.santec3.write(\"WA %.4f\" % wavelength)\n if self.active_module != 3:\n self.active_module = 3\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n elif 1355 < wavelength < 1440.1:\n self.santec1.write(\"SW 2\")\n self.santec2.write(\"WA %.4f\" % wavelength)\n if self.active_module != 2:\n self.active_module = 2\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n elif 1259.999999 < wavelength < 1355.1:\n self.santec1.write(\"SW 1\")\n self.santec1.write(\"WA %.4f\" % wavelength)\n if self.active_module != 1:\n self.active_module = 1\n time.sleep(5.00)\n else:\n time.sleep(0.01)\n\n else:\n print(\"Wavelength out of range. No change will be made\")", "def set_wavelength(self, wavelength: float) -> None:\n\n assert isinstance(wavelength, float), \"Incompatible type\"\n\n #:SENSe[n][:CHANnel[m]]:POWer:WAVelength /?\n self._inst.write(\"SENS:POW:WAV {}\".format(wavelength))", "def __init__(self, wavelength):\r\n self.dividerString_ = \"\\nPixel\"\r\n \"\"\"\r\n Their relative wavenumber ranges are not.\r\n \"\"\"\r\n if wavelength == 785:\r\n self.START = 182\r\n self.END = 1986\r\n if wavelength == 1064:\r\n self.START = 58\r\n self.END = 486 \r\n \"\"\"\r\n Indices of ramanshift, dark and raw data are invariant. The indices\r\n refer to the column position of the data vector\r\n \"\"\"\r\n self.RAMANSHIFT = 3\r\n self.DARK = 4\r\n self.RAWDATA = 6\r\n \"\"\"\r\n Locations and files start as empty strings \r\n \"\"\"\r\n self.location = \"\"\r\n self.files = \"\"", "def wavelength(self):\n return self.get(self._names[\"wavelength\"])", "def wavelength_ex(hdulist):\n wave = hdulist[1].data['loglam']\n wave = 10**wave\n\n return wave", "def to_wavelength(self):\n\n if self.unit == 'f_lam':\n raise ValueError('Dispersion is arealdy in wavelength')\n elif self.unit == 'f_nu':\n self.flux = self.flux * self.dispersion**2 / (c.value * 1e+10)\n self.dispersion = (c.value * 1e+10) / self.dispersion\n\n self.flux = np.flip(self.flux, axis=0)\n self.dispersion = np.flip(self.dispersion, axis=0)\n\n elif self.unit == 'f_loglam':\n self.dispersion = np.exp(self.dispersion)\n self.flux = self.flux / self.dispersion\n else:\n raise ValueError('Spectrum unit not recognized: ', self.unit)\n\n self.unit = 'f_lam'", "def __init__(self,\n name: str = \"Untitled Wing\",\n xyz_le: np.ndarray = np.array([0, 0, 0]),\n xsecs: List['WingXSec'] = [],\n symmetric: bool = False,\n ):\n self.name = name\n self.xyz_le = np.array(xyz_le)\n self.xsecs = xsecs\n self.symmetric = symmetric", "def __init__(self, wavename):\n super(DWT_2D, self).__init__()\n wavelet = pywt.Wavelet(wavename)\n self.band_low = wavelet.rec_lo\n self.band_high = wavelet.rec_hi\n assert len(self.band_low) == len(self.band_high)\n self.band_length = len(self.band_low)\n assert self.band_length % 2 == 0\n self.band_length_half = math.floor(self.band_length / 2)", "def __init__(self, wavelength):\n # store experimental data\n self.x = wavelength\n\n # Central wavelengths of the lines are known constants:\n self.c1 = 422.\n self.c2 = 428.", "def define(self,name,zcmb,ra,dec,mjd=None,\n cosmo=None,type_=None,sfd98_dir=None,\n forced_mwebmv=None,zcmb_err=None, verbose=False):\n self.name = name\n self.set_zcmb(zcmb,zcmb_err)\n self.radec = ra,dec\n\n self._side_properties[\"mjd\"] = mjd\n self.type = type_\n if cosmo is None:\n from astropy.cosmology import Planck15\n cosmo = Planck15\n if verbose:\n print(\"Planck 2015 cosmology used by default\")\n warnings.warn(\"Planck 2015 cosmology used by default\")\n \n self.set_cosmo(cosmo)\n self._update_()\n if forced_mwebmv is not None:\n self.set_mwebmv(forced_mwebmv,force_it=True)", "def add_wavelength(filename, model, std_tol, overwrite=False, plot_path=None):\n hdulist = fits.open(filename)\n\n # read both hdu's\n logger.debug(\"\\tObject: {}\".format(hdulist[0].header['OBJECT']))\n\n # extract just the middle part of the CCD (we only really care about Halpha)\n tbl = Table(hdulist[1].data)\n\n if 'wavelength' in tbl.colnames and not overwrite:\n logger.debug(\"\\tTable already contains wavelength values!\")\n return\n\n # compute wavelength array for the pixels\n wavelength, var = model.gp.predict(model.y, tbl['pix']-model.x_shift,\n return_var=True)\n bad_idx = np.sqrt(var) > std_tol.to(u.angstrom).value\n wavelength[bad_idx] = np.nan\n\n tbl['wavelength'] = wavelength\n tbl['wavelength_err'] = np.sqrt(var)\n\n new_hdu1 = fits.table_to_hdu(tbl)\n new_hdulist = fits.HDUList([hdulist[0], new_hdu1])\n\n logger.debug(\"\\tWriting out file with wavelength array.\")\n new_hdulist.writeto(filename, overwrite=True)\n\n if plot_path is not None:\n # plot the spectrum vs. wavelength\n fig,axes = plt.subplots(2, 1, figsize=(12,8), sharex=True)\n\n axes[0].plot(tbl['wavelength'], tbl['source_flux'],\n marker='', drawstyle='steps-mid', linewidth=1.)\n axes[0].errorbar(tbl['wavelength'], tbl['source_flux'], 1/np.sqrt(tbl['source_ivar']),\n linestyle='none', marker='', ecolor='#666666', alpha=1., zorder=-10)\n axes[0].set_ylim(tbl['source_flux'][200]/4, np.nanmax(tbl['source_flux']))\n axes[0].set_yscale('log')\n\n axes[1].plot(tbl['wavelength'], tbl['background_flux'],\n marker='', drawstyle='steps-mid', linewidth=1.)\n axes[1].errorbar(tbl['wavelength'], tbl['background_flux'], 1/np.sqrt(tbl['background_ivar']),\n linestyle='none', marker='', ecolor='#666666', alpha=1., zorder=-10)\n axes[1].set_ylim(1e-1, np.nanmax(tbl['background_flux']))\n axes[1].set_yscale('log')\n\n fig.tight_layout()\n _filename_base = path.splitext(path.basename(filename))[0]\n fig.savefig(path.join(plot_path, '{0}_1d_wvln.png'\n .format(_filename_base)))\n\n plt.close(fig)", "def wavelength(refractive_index, omega):\n return 2 * np.pi * cgs.c / (refractive_index * omega)", "def wavelength(energy):\r\n return 2 * np.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def wavelength(energy):\n return 2 * PI * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy", "def __init__(self, name, crystal):\n\n # set up this object\n self._name = name\n self._crystal = crystal\n\n # then create space to store things which are contained\n # in here - the sweeps\n self._sweeps = []\n\n self.multi_indexer = None\n self.multi_refiner = None", "def setwave(hdr):\n\n # Parse the header\n npix = hdr['NAXIS1']\n crpix1 = hdr['CRPIX1'] if 'CRPIX1' in hdr else 1.\n crval1 = hdr['CRVAL1']\n\n cdelt1, dc_flag = get_cdelt_dcflag(hdr)\n\n # Generate\n wave = crval1 + cdelt1 * (np.arange(npix) + 1. - crpix1)\n if dc_flag == 1:\n wave = 10.**wave # Log\n\n return wave", "def set_wavelength(self, wavelength):\n assert 0 <= wavelength <= 5000\n # Note: When connected via the IC bus of the camera, it is not\n # possible to change the wavelength (or the grating) while the CCD\n # is acquiring. So this will fail with an exception, and that's\n # probably the best we can do (unless we want to synchronize with the\n # CCD and ask to temporarily stop the acquisition).\n\n # Currently the SDK sometimes fail with 20201: SHAMROCK_COMMUNICATION_ERROR\n # when changing wavelength by a few additional nm. It _seems_ that it\n # works anyway (but not sure).\n # It seems that retrying a couple of times just works\n\n retry = 0\n while True:\n # set in nm\n err = self._dll.ShamrockSetWavelength(self._device, c_float(wavelength))\n if err != 20202 and retry <= 5: # as long as no success and lower than 5 retries\n # just try again\n retry += 1\n print(\"Failed to set wavelength, will try again\")\n time.sleep(0.1)\n else:\n self._grating_center = wavelength\n self.status(\"Wavelength change\", err)\n break", "def wavelength(self):\n return wavelength(energy)", "def powder_XRD(crystal,wavelength, get_mults=False):\n \n # The wavenumber of the input wavelength\n nu = 2*n.pi/wavelength\n\n # Make a list of the accessible rlvs\n rlvs = find_accessible_rlvs(crystal,wavelength)\n \n # Now we calculate the scattering intensity from each rlv\n intensities = {\n tuple(rlv): n.abs(crystal.structure_factor(rlv))**2\n for rlv in rlvs}\n \n # Now sum up all rlvs with the same magnitude. We also\n # get rid of all the scattering vectors with 0 intensity\n magnitudes = {}\n multiplicities = {}\n for rlv, intensity in intensities.items():\n repeat = False\n mag = n.linalg.norm(rlv)\n for oldmag in magnitudes:\n if n.isclose(mag,oldmag):\n magnitudes[oldmag] += intensity\n multiplicities[oldmag] += 1\n repeat = True\n break\n if not repeat and not n.isclose(mag,0):\n multiplicities[mag] = 1\n magnitudes[mag] = intensity\n \n # Now we reformat the multiplicity data in a nice way\n multiplicities = {2 * n.arcsin(mag / (2 * nu)) * 180 / n.pi:\n multiplicity\n for mag, multiplicity in multiplicities.items()\n if not n.allclose(magnitudes[mag],0)}\n\n # And now we calculate the scattering intensities\n # (a.u. per steradian) as a function of scattering angle\n intensities = {2 * n.arcsin(mag / (2 * nu)) * 180 / n.pi:\n intensity * \n # This factor corrects for the fact that the same total\n # power in the debye scherrer rings is more\n # concentrated when 2\\theta is near 0 or 2pi\n 1 / n.sin(2*n.arcsin(mag/(2*nu))) *\n # This factor corrects for the probability that any\n # given crystal domain will scatter into the rlv\n 1 / mag *\n # This factor corrects for polarization effects,\n # Assuming an unpolarized input beam and no polarization\n # analysis\n (1 + n.cos(2*n.arcsin(mag/(2*nu)))**2)/2\n for mag, intensity in magnitudes.items()\n if not n.allclose(intensity,0)}\n if get_mults:\n return intensities, multiplicities\n else:\n return intensities", "def set_wavelenth(self, wavelength):\n if wavelength < 0:\n raise ValueError(\"The wavelength cannot be negative\")\n\n self.wavelength = wavelength\n RT_model_1D.set_scattering_cross_sec(self)\n RT_model_1D.get_atmoshperic_profiles(self)\n self.sun_intensity = f.sun_init_intensity(self.wavelength, self.stokes_dim)", "def wavelength(self):\n return self.getparam(\"WAVELENGTH\")", "def wavelength(self):\n return self.getparam(\"WAVELENGTH\")", "def newWaveform(self, name, size, stringOnly=0):\n msg='WLIST:WAVeform:NEW \"' +name+ '\", ' + str(size)+ ', REAL'\n if stringOnly==0:\n self.sendMessage(msg)\n else:\n return msg" ]
[ "0.6480484", "0.61226916", "0.5979", "0.554436", "0.54751027", "0.5458804", "0.5410918", "0.53420633", "0.52742976", "0.5226003", "0.5211166", "0.52007395", "0.5154255", "0.5141563", "0.50950485", "0.5072513", "0.5064921", "0.506434", "0.503512", "0.50331575", "0.503013", "0.49934363", "0.49644995", "0.49487048", "0.49407023", "0.49208927", "0.49139297", "0.49085042", "0.49085042", "0.4900795" ]
0.6854536
0
Add a sweep to this wavelength.
def add_sweep( self, name, sample, directory=None, image=None, beam=None, reversephi=False, distance=None, gain=0.0, dmin=0.0, dmax=0.0, polarization=0.0, frames_to_process=None, user_lattice=None, user_cell=None, epoch=0, ice=False, excluded_regions=None, ): if excluded_regions is None: excluded_regions = [] xsweep = XSweep( name, self, sample=sample, directory=directory, image=image, beam=beam, reversephi=reversephi, distance=distance, gain=gain, dmin=dmin, dmax=dmax, polarization=polarization, frames_to_process=frames_to_process, user_lattice=user_lattice, user_cell=user_cell, epoch=epoch, ice=ice, excluded_regions=excluded_regions, ) self._sweeps.append(xsweep) return xsweep
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_sweep(self, environment_name, agent_name, agent_params, sweep_dict):\n assert environment_name in self._environment_dict\n assert self._is_sweep is True or self._is_sweep is None\n self._is_sweep = True\n\n for sweep_key, sweep_params in sweep_dict.items():\n sweep_name = agent_name + '_' + sweep_key\n if sweep_name in self._experiment_structure[environment_name]:\n raise AttributeError(\n f'An sweep for environment {environment_name}, builders {agent_name} '\n f'and sweep key {sweep_key} already exists.'\n )\n\n environment_builder_params = self._environment_dict[environment_name]['build_params']\n\n try:\n exp = self._create_experiment_sweep(environment_name, environment_builder_params,\n agent_name, agent_params, sweep_key, sweep_params)\n self._experiment_structure[environment_name][sweep_name] = exp\n except AttributeError as e:\n self.logger.error(\n f'Unable to create sweep for environment {environment_name}, agent {agent_name} '\n f'and sweep key {sweep_key}'\n )\n self.logger.exception(e)", "def remove_sweep(self, sweep):\n\n try:\n self._sweeps.remove(sweep)\n except ValueError:\n pass", "def remove_sweep(self, sweep):\n\n try:\n self._sweeps.remove(sweep)\n except ValueError:\n pass", "def create_frequency_sweep(\n self,\n setupname,\n unit,\n freqstart,\n freqstop,\n num_of_freq_points,\n sweepname=None,\n sweeptype=\"interpolating\",\n interpolation_tol_percent=0.5,\n interpolation_max_solutions=250,\n save_fields=True,\n save_rad_fields_only=False,\n use_q3d_for_dc=False,\n ):\n\n warnings.warn(\n \"`create_frequency_sweep` is deprecated. Use `create_linear_count_sweep` instead.\",\n DeprecationWarning,\n )\n if sweeptype == \"interpolating\":\n sweeptype = \"Interpolating\"\n elif sweeptype == 'discrete':\n sweeptype = \"Discrete\"\n elif sweeptype == 'fast':\n sweeptype = \"Fast\"\n\n return self.create_linear_count_sweep(\n setupname=setupname,\n unit=unit,\n freqstart=freqstart,\n freqstop=freqstop,\n num_of_freq_points=num_of_freq_points,\n sweepname=sweepname,\n save_fields=save_fields,\n save_rad_fields_only=save_rad_fields_only,\n sweep_type=sweeptype,\n interpolation_tol_percent=interpolation_tol_percent,\n interpolation_max_solutions=interpolation_max_solutions,\n use_q3d_for_dc=use_q3d_for_dc,\n )", "def create_linear_count_sweep(\n self,\n setupname,\n unit,\n freqstart,\n freqstop,\n num_of_freq_points,\n sweepname=None,\n save_fields=True,\n save_rad_fields_only=False,\n sweep_type=\"Interpolating\",\n interpolation_tol_percent=0.5,\n interpolation_max_solutions=250,\n use_q3d_for_dc=False,\n ):\n if sweep_type not in [\"Discrete\", \"Interpolating\", \"Fast\"]:\n raise AttributeError(\"Invalid in `sweep_type`. It has to be either 'Discrete', 'Interpolating', or 'Fast'\")\n if sweepname is None:\n sweepname = generate_unique_name(\"Sweep\")\n\n interpolation = False\n if sweep_type == \"Interpolating\":\n interpolation = True\n save_fields = False\n\n if not save_fields:\n save_rad_fields_only = False\n\n interpolation_tol = interpolation_tol_percent / 100.0\n\n for s in self.setups:\n if s.name == setupname:\n setupdata = s\n if sweepname in [sweep.name for sweep in setupdata.sweeps]:\n oldname = sweepname\n sweepname = generate_unique_name(oldname)\n self.logger.glb.warning(\n \"Sweep %s is already present. Sweep has been renamed in %s.\", oldname, sweepname)\n sweep = setupdata.add_sweep(sweepname=sweepname)\n sweep.change_range(\"LinearCount\", freqstart, freqstop, num_of_freq_points, unit)\n sweep.props[\"GenerateSurfaceCurrent\"] = save_fields\n sweep.props[\"SaveRadFieldsOnly\"] = save_rad_fields_only\n sweep.props[\"FastSweep\"] = interpolation\n sweep.props[\"SAbsError\"] = interpolation_tol\n sweep.props[\"EnforcePassivity\"] = interpolation\n sweep.props[\"UseQ3DForDC\"] = use_q3d_for_dc\n sweep.props[\"MaxSolutions\"] = interpolation_max_solutions\n sweep.update()\n self.logger.glb.info(\"Linear count sweep %s has been correctly created\", sweepname)\n return sweep\n return False", "def add_sweep():\n global gui\n start = request.args.get('start', '')\n stop = request.args.get('stop', '')\n step = request.args.get('step', '')\n parameter = request.args.get('parameter', '')\n\n parameters, interactive, bs = gui.controller.main('parameters')\n\n # print('Start: %s\\nStop: %s\\nStep: %s\\nParameter: %s' % (start, stop, step, parameter))\n if start != '' and stop != '' and step != '' and parameter != '':\n interactive_index = -1\n for index in interactive:\n # similar = True\n # print(index)\n\n if str(index) in parameter:\n interactive_index = interactive.index(parameter)\n if interactive_index != -1:\n gui.controller.main(\"sweep %s %s %s %s\" % (interactive_index, start, stop, step))\n else:\n raise ValueError('Interactive_index could not be found: parameter unknown!')\n sweeps = gui.controller.get_sweeps()\n # print(\"Sweeps = \" + str(sweeps))\n return render_template('parameters.html', parameters=interactive, sweeps=sweeps)", "def create_linear_step_sweep(\n self,\n setupname,\n unit,\n freqstart,\n freqstop,\n step_size,\n sweepname=None,\n save_fields=True,\n save_rad_fields_only=False,\n sweep_type=\"Interpolating\",\n interpolation_tol_percent=0.5,\n interpolation_max_solutions=250,\n use_q3d_for_dc=False,\n ):\n if sweep_type not in [\"Discrete\", \"Interpolating\", \"Fast\"]:\n raise AttributeError(\"Invalid in `sweep_type`. It has to be either 'Discrete', 'Interpolating', or 'Fast'\")\n if sweepname is None:\n sweepname = generate_unique_name(\"Sweep\")\n\n interpolation = False\n if sweep_type == \"Interpolating\":\n interpolation = True\n save_fields = False\n\n if not save_fields:\n save_rad_fields_only = False\n\n interpolation_tol = interpolation_tol_percent / 100.0\n\n for s in self.setups:\n if s.name == setupname:\n setupdata = s\n if sweepname in [sweep.name for sweep in setupdata.sweeps]:\n oldname = sweepname\n sweepname = generate_unique_name(oldname)\n self.logger.glb.warning(\n \"Sweep %s is already present. Sweep has been renamed in %s.\", oldname, sweepname)\n sweep = setupdata.add_sweep(sweepname=sweepname)\n sweep.change_range(\"LinearStep\", freqstart, freqstop, step_size, unit)\n sweep.props[\"GenerateSurfaceCurrent\"] = save_fields\n sweep.props[\"SaveRadFieldsOnly\"] = save_rad_fields_only\n sweep.props[\"FastSweep\"] = interpolation\n sweep.props[\"SAbsError\"] = interpolation_tol\n sweep.props[\"EnforcePassivity\"] = interpolation\n sweep.props[\"UseQ3DForDC\"] = use_q3d_for_dc\n sweep.props[\"MaxSolutions\"] = interpolation_max_solutions\n sweep.update()\n self.logger.glb.info(\"Linear step sweep %s has been correctly created\", sweepname)\n return sweep\n return False", "def create_single_point_sweep(\n self,\n setupname,\n unit,\n freq,\n sweepname=None,\n save_fields=False,\n save_rad_fields_only=False,\n ):\n if sweepname is None:\n sweepname = generate_unique_name(\"SinglePoint\")\n\n add_subranges = False\n if isinstance(freq, list):\n if not freq:\n raise AttributeError(\"Frequency list is empty! Specify at least one frequency point.\")\n freq0 = freq.pop(0)\n if freq:\n add_subranges = True\n else:\n freq0 = freq\n\n if setupname not in self.setup_names:\n return False\n for s in self.setups:\n if s.name == setupname:\n setupdata = s\n if sweepname in [sweep.name for sweep in setupdata.sweeps]:\n oldname = sweepname\n sweepname = generate_unique_name(oldname)\n self.logger.glb.warning(\n \"Sweep %s is already present. Sweep has been renamed in %s.\", oldname, sweepname)\n sweepdata = setupdata.add_sweep(sweepname, \"Discrete\")\n sweepdata.change_range(\"SinglePoint\", freq0, unit=unit)\n sweepdata.props[\"GenerateSurfaceCurrent\"] = save_fields\n sweepdata.props[\"SaveRadFieldsOnly\"] = save_rad_fields_only\n sweepdata.update()\n if add_subranges:\n for f in freq:\n sweepdata.add_subrange(rangetype=\"SinglePoint\", start=f, unit=unit)\n self.logger.glb.info(\"Single point sweep %s has been correctly created\", sweepname)\n return sweepdata\n return False", "def configure_power_sweep(\n self, freq, start_power, stop_power, *, points=None, ifbw=None\n ):\n self.sweep.type = Sweep.POWER\n self.freq_cw = freq\n self.SOURce.POWer[1].STARt.w(\n start_power\n ) # The port number suffix on POWer is ignored by the instrument\n self.SOURce.POWer[1].STOP.w(stop_power)\n if points:\n self.sweep.points = points\n if ifbw:\n self.ifbw = ifbw", "def combine(self,sweep2):\n\n sweep1=self\n\n if not isinstance(sweep2,SweepParam):\n\n raise ValueError(\"the parameter needs to be a SweepParam\")\n\n init_names=sweep1.names\n\n new_names=sweep2.names\n\n init_values=[]\n\n for x in sweep1.values:\n\n if isinstance(x,np.ndarray):\n\n init_values.append(x.tolist())\n\n else:\n\n init_values.append(x)\n\n new_values=[]\n\n for x in sweep2.values:\n\n if isinstance(x,np.ndarray):\n\n new_values.append(x.tolist())\n\n else:\n\n new_values.append(x)\n\n if any([name in new_names for name in init_names]):\n\n raise ValueError(\"Unexpected behaviour:at least one sweep parameter is repeated\")\n\n if len(init_values)>1:\n\n init_values=[_ for _ in zip(*init_values)]\n\n else:\n\n init_values=init_values[0]\n\n if len(new_values)>1:\n\n new_values=[_ for _ in zip(*new_values)]\n\n else:\n\n new_values=new_values[0]\n\n import itertools\n\n tot_values=[_ for _ in itertools.product(init_values,new_values)]\n\n new_length=len(tot_values)\n\n def flatten(L):\n for item in L:\n try:\n yield from flatten(item)\n except TypeError:\n yield item\n\n tot_values=[_ for _ in flatten(tot_values)]\n\n dict_new={x : [] for x in init_names+new_names}\n\n for index in range(new_length):\n\n for name in dict_new.keys():\n\n dict_new[name].append(tot_values.pop(0))\n\n return SweepParam(dict_new)", "def configure_freq_sweep(\n self,\n start_freq,\n stop_freq,\n *,\n points=None,\n ifbw=None,\n power=None,\n log_sweep=False\n ):\n if not log_sweep:\n self.sweep.type = Sweep.LIN\n else:\n self.sweep.type = Sweep.LOG\n\n self.freq_start = start_freq\n self.freq_stop = stop_freq\n if points is not None:\n self.sweep.points = points\n if ifbw is not None:\n self.ifbw = ifbw\n if power is not None:\n self.power_level = power", "def add_W(self, W):\n self.Ws.append(W)", "def add(self, value):\n if self.squared:\n if isinstance(value, list):\n value = [np.square(x) for x in value]\n else:\n value = np.square(value)\n\n if isinstance(value, list):\n for i in range(0, len(value)):\n self.value[i] = np.multiply(self.decay, self.value[i]) + np.multiply((1. - self.decay), value[i])\n else:\n self.value = np.multiply(self.decay, self.value) + np.multiply((1. - self.decay), value)", "def plot_sweep(self,x_axis_list,y_axis_list,sweep_number):\n plot.figure(sweep_number)\n plot.scatter(x_axis_list, y_axis_list, c='r')\n plot.scatter([self.flight_points[sweep_number][0]],[self.flight_points[sweep_number][1]], c='g')\n plot.show()", "def __add__(self, spectrum):\n new_wave = np.unique(np.append(self.wave, spectrum.wave))\n new_wave.sort()\n \n new_flux = np.interp(new_wave, self.wave, self.flux)\n new_flux += np.interp(new_wave, spectrum.wave, spectrum.flux)\n out = SpectrumTemplate(wave=new_wave, flux=new_flux)\n out.fwhm = spectrum.fwhm\n return out", "def AddWave(self, wave_data):\n wave = OpBasedWave(wave_data, self)\n self._waves[wave.GetId()] = wave\n return wave", "def add_synth(self, wave, synth, iteration=0):\n self.fig.add_scatter(x=wave, y=synth, name=f\"Iteration {iteration}\")", "def AddInstrumentVelocity(self, ds):\n self.IsInstrumentVelocity = True\n self.InstrumentVelocity = ds", "def __add__(self, other):\n\n self._add_sub_error_checking(other)\n if (self._counts is None) ^ (other._counts is None):\n raise SpectrumError(\n 'Addition of counts-based and CPS-based spectra is ' +\n 'ambiguous, use Spectrum(counts=specA.counts+specB.counts) ' +\n 'or Spectrum(cps=specA.cps+specB.cps) instead.')\n\n if self._counts is not None and other._counts is not None:\n kwargs = {'counts': self.counts + other.counts}\n if self.livetime and other.livetime:\n kwargs['livetime'] = self.livetime + other.livetime\n else:\n warnings.warn('Addition of counts with missing livetimes, ' +\n 'livetime was set to None.', SpectrumWarning)\n else:\n kwargs = {'cps': self.cps + other.cps}\n spect_obj = Spectrum(\n bin_edges_kev=self.bin_edges_kev, **kwargs)\n return spect_obj", "def setSweepResolution(self,sweepResolution: int) -> None:\n\n if not self.debug:\n self.myFieldFox.write(\"SWE:POIN \" + str(sweepResolution))\n\n return", "def display_sweep(self):\n for sweep in range(len(self.sweep_points)):\n lidar_x_coordinate=self.flight_points[sweep][0]\n lidar_y_coordinate=self.flight_points[sweep][1]\n\n xx=[]\n yy=[]\n for point in range(len(self.sweep_points[sweep])):\n angle_degree=self.sweep_points[sweep][point][0]\n distance=self.sweep_points[sweep][point][1]\n angle_redian = (math.pi * angle_degree) / 180.0\n sweep_point_x=lidar_x_coordinate+ (distance * math.cos(angle_redian))/1000.0\n sweep_point_y=lidar_y_coordinate+ (distance * math.sin(angle_redian))/1000.0\n xx.append(sweep_point_x)\n yy.append(sweep_point_y)\n\n self.plot_sweep(xx,yy,sweep)", "def add(self, x, y, label=\"\"):\n self.fig.add_scatter(x=x, y=y, name=label, legendgroup=10)\n self.visible += [-1]\n\n # Update Sliders\n steps = []\n for i in range(self.nsegments):\n step_visible = [(v == i) or (v == -1) for v in self.visible]\n step = {\n \"label\": f\"Segment {i}\",\n \"method\": \"update\",\n \"args\": [\n {\"visible\": step_visible},\n {\n \"title\": f\"Segment {i}\",\n \"annotations\": self.annotations[i],\n \"xaxis\": {\"range\": list(self.wran[i])},\n \"yaxis\": {\"autorange\": True},\n },\n ],\n }\n steps += [step]\n\n self.fig.layout[\"sliders\"][0][\"steps\"] = steps", "def add_wad(self, wad):\n\n self.wads.append(wad)", "def add(self, p):\n\n self.poses.append(CalibrationPoint(p))\n self.selected_point = len(self.poses) - 1\n self.calibration_changed()", "def Add(*args):\n return _gdi_.GraphicsGradientStops_Add(*args)", "def sweep(Finf = default['freqMin'],\r\n Fsup = default['freqMax'],\r\n Fs = default['samplingRate'],\r\n fftDeg = default['fftDegree'],\r\n startmargin = default['startMargin'],\r\n stopmargin = default['stopMargin'],\r\n method = 'logarithmic',\r\n windowing = 'hann'):\r\n Flim = np.array([Finf/(2**(1/6)), min(Fsup*(2**(1/6)),Fs/2)]); # frequency limits [Hz]\r\n Ts = 1/Fs # [s] sampling period\r\n Nstop = stopmargin*Fs # [samples] initial silence number of samples\r\n Nstart = startmargin*Fs # [samples] ending silence number of samples\r\n Nmargin = Nstart + Nstop # [samples] total silence number of samples\r\n N = 2**fftDeg # [samples] full signal number of samples\r\n Nsweep = N - Nmargin +1 # [samples] actual sweep number of samples\r\n Tsweep = Nsweep/Fs # [s] sweep's time length\r\n tsweep = np.arange(0,Tsweep,Ts) # [s] sweep time vector\r\n if tsweep.size > Nsweep: tsweep = tsweep[0:int(Nsweep)] # adjust length\r\n sweept = 0.8*signal.chirp(tsweep, Flim[0], Tsweep, Flim[1],\\\r\n 'logarithmic', phi=-90) # sweep, time domain\r\n sweept = __do_sweep_windowing(sweept,tsweep,Flim,Finf,Fsup,windowing) # fade in and fade out\r\n xt = np.concatenate( (np.zeros( int(Nstart) ),\\\r\n sweept,\\\r\n np.zeros( int(Nstop) ) ) ) # add initial and ending sileces\r\n if xt.size != N: xt = xt[0:int(N)] # adjust length\r\n x = pytta.signalObj(xt,'time',Fs) # transforms into a pytta signalObj\r\n x.Flim = Flim # pass on the frequency limits considering the fade in and fade out\r\n return x", "def addShift(self,shift):\n self.shifts.append(shift)", "def addShift(self,shift):\n self.shifts.append(shift)", "def AddEarthVelocity(self, ds):\n self.IsEarthVelocity = True\n self.EarthVelocity = ds", "def add_station(self, station):\n self.__stations.append(station)" ]
[ "0.5718773", "0.56797856", "0.56797856", "0.5633566", "0.54784757", "0.5438503", "0.52998817", "0.5227742", "0.51928663", "0.5173931", "0.5164654", "0.5148041", "0.51270616", "0.5124847", "0.5116086", "0.5052501", "0.4995556", "0.4933879", "0.48856956", "0.48348308", "0.48040885", "0.47525638", "0.47181457", "0.47143945", "0.46991646", "0.4680055", "0.4645306", "0.4645306", "0.46316764", "0.46157414" ]
0.6326733
0
Remove a sweep object from this wavelength.
def remove_sweep(self, sweep): try: self._sweeps.remove(sweep) except ValueError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove(self):\n\n if self.selected_point is None:\n RosProxy().notify(\"No calibration point selected\", STATE.ERROR)\n return\n\n if len(self.poses) == 0:\n RosProxy().notify(\"No calibration point added\", STATE.ERROR)\n return\n\n self.poses.remove(self.poses[self.selected_point])\n\n if len(self.poses) == 0:\n self.selected_point = None\n else:\n self.selected_point = min(len(self.poses) - 1, self.selected_point)\n\n self.calibration_changed()", "def RemoveWave(self, wave_id):\n if wave_id in self._waves:\n del self._waves[wave_id]", "def remove_object(self, name):\n if name in self._objects:\n del self._objects[name]\n else:\n raise ValueError('Object {} not in scene!'.format(name))\n self.close_renderer()", "def WaveletRemoveSelf(self, wave_id, wavelet_id):\n raise NotImplementedError()", "def remove_object(self, obj):\n pass", "def remove_obj(self, obj_name):\n self.scene.remove_world_object(obj_name)", "def remove(cls, ob):\n return cls._remove_avos(cls.__name__, ob)", "def remove(self, pane: Viewable) -> None:\n new_objects = list(self)\n if pane in new_objects:\n index = new_objects.index(pane)\n new_objects.remove(pane)\n self._names.pop(index)\n self.objects = new_objects", "def remove(self, name):\n\n w = self._wdict[name]\n del(self._wdict[name])\n \n \n return w", "def remove(self, game_obj):\r\n self.game_objects_for_removal.append(game_obj)", "def remove(self):\n raise NotImplementedError", "def remove(self):\n raise NotImplementedError", "def remove(self, x):\n del self.d[x]", "def remove():", "def removePoint(self, point):\n self.points.remove(point)", "def remove(self):\n pass", "def remove(self):\n pass", "def remove(self):\n pass", "def remove_stop(self, stop):\n for i, item in enumerate(self):\n if item.properties == stop.properties:\n del self[i]\n break", "def removeObject(self,object):\n self.removeList.append(object)", "def __isub__(self, point):\n self.points.remove(point)\n return self", "def remove(self, x):\n del self[self.index(x)]", "def remove_object(self, name: str):\r\n try:\r\n os.remove(self._path_for_pickle(name))\r\n except FileNotFoundError:\r\n pass", "def RemoveWavelet(self, wavelet_id):\n if wavelet_id in self._wavelets:\n del self._wavelets[wavelet_id]", "def delX(self):\n del self.components[0]", "def delX(self):\n del self.components[0]", "def remove(self):\r\n game_ref.remove(self)", "def remove(self):\n traci.vehicle.remove(self.id)", "def remove(self):\n traci.vehicle.remove(self.id)", "def remove(self):" ]
[ "0.6072216", "0.59406203", "0.57992524", "0.5782639", "0.577996", "0.5719137", "0.5677598", "0.5662963", "0.55765265", "0.5548133", "0.54995716", "0.54995716", "0.5497622", "0.5481501", "0.5480912", "0.54672265", "0.54672265", "0.54672265", "0.54426044", "0.54155236", "0.5411871", "0.5410653", "0.540148", "0.538145", "0.5372156", "0.5372156", "0.5365669", "0.53594154", "0.53594154", "0.5357998" ]
0.8053079
0
Test HSMCatalog with fourth_order=True
def test_fourth_order(): if __name__ == '__main__': logger = piff.config.setup_logger(verbose=2) else: logger = piff.config.setup_logger(log_file='output/test_hsmcatalog.log') image_file = os.path.join('output','test_stats_image.fits') cat_file = os.path.join('output','test_stats_cat.fits') psf_file = os.path.join('output','test_starstats.fits') hsm_file = os.path.join('output', 'test_hsmcatalog.fits') config = { 'input' : { 'image_file_name' : image_file, 'cat_file_name' : cat_file, 'stamp_size' : 48, }, 'psf' : { 'model' : { 'type' : 'Gaussian', 'fastfit': True, 'include_pixel': False }, 'interp' : { 'type' : 'Mean' }, }, 'output' : { 'file_name' : psf_file, 'stats' : [ { 'type': 'HSMCatalog', 'file_name': hsm_file, 'fourth_order': True } ] } } piff.piffify(config, logger) assert os.path.isfile(hsm_file) data = fitsio.read(hsm_file) print('data = ',data) # Check that the model and data measurements are close np.testing.assert_allclose(data['T_model'], data['T_data'], rtol=1.e-4) np.testing.assert_allclose(data['g1_model'], data['g1_data'], rtol=1.e-4) np.testing.assert_allclose(data['g2_model'], data['g2_data'], rtol=1.e-4) np.testing.assert_allclose(data['T4_model'], data['T4_data'], rtol=1.e-4) np.testing.assert_allclose(data['g41_model'], data['g41_data'], atol=1.e-4) np.testing.assert_allclose(data['g42_model'], data['g42_data'], atol=1.e-4) np.testing.assert_allclose(data['h41_model'], data['h41_data'], rtol=1.e-4) np.testing.assert_allclose(data['h42_model'], data['h42_data'], rtol=1.e-4) # Check that the moment values are what we intend them to be psf = piff.read(psf_file) stars = piff.Star.load_images(psf.stars, image_file) for i, star in enumerate(stars): moments = piff.util.calculate_moments(star, fourth_order=True) T = moments['M11']*2 shape = galsim.Shear(e1=moments['M20']/moments['M11'], e2=moments['M02']/moments['M11']) print('moments = ',moments) print('hsm = ',star.hsm) print('data = ',data[i]) print(data['T_data'][i], T, 2*star.hsm[3]**2 / (1-shape.e**2)**0.5) print(data['g1_data'][i], shape.g1, star.hsm[4]) print(data['g2_data'][i], shape.g2, star.hsm[5]) np.testing.assert_allclose(data['T_data'][i], T, rtol=1.e-5) np.testing.assert_allclose(data['g1_data'][i], shape.g1, rtol=1.e-5) np.testing.assert_allclose(data['g2_data'][i], shape.g2, rtol=1.e-5) T4 = moments['M22'] / T np.testing.assert_allclose(data['T4_data'][i], moments['M22']/moments['M11'], rtol=1.e-5) np.testing.assert_allclose(data['g41_data'][i], moments['M31']/moments['M11']**2 - 3*shape.e1, atol=1.e-5) np.testing.assert_allclose(data['g42_data'][i], moments['M13']/moments['M11']**2 - 3*shape.e2, atol=1.e-5) np.testing.assert_allclose(data['h41_data'][i], moments['M40']/moments['M11']**2, rtol=1.e-5) np.testing.assert_allclose(data['h42_data'][i], moments['M04']/moments['M11']**2, rtol=1.e-5) # Our simulated data here are elliptical Gaussians, so check that the fourth order terms # match what we expect for them. # # First, for a round Gaussian, M22 = T^2. # When there is some ellipticity, there is a correction of (1-e^2). # It doesn't come out exact, but it's reasonably close. Not sure why it's not closer... print('T4: ', data['T4_data'][i], T/(1-shape.e**2)**0.5) np.testing.assert_allclose(data['T4_data'][i], T/(1-shape.e**2)**0.5, rtol=0.05) # Next, the naive 4th order shape of an elliptical Gaussian is approximately 3e: # M31/M11^2 ~= 3 M20/M11 # M13/M11^2 ~= 3 M02/M11 print('e4: ', moments['M31']/moments['M11']**2, 3*moments['M20']/moments['M11']) print('e4: ', moments['M13']/moments['M11']**2, 3*moments['M02']/moments['M11']) np.testing.assert_allclose(moments['M31']/moments['M11'], 3*moments['M20'], rtol=1.e-3) np.testing.assert_allclose(moments['M13']/moments['M11'], 3*moments['M02'], rtol=1.e-3) # Our g4 measurements subtract off this leading order effect, so for these Gaussian # profiles, the 4th order terms are close to 0. print('g4: ', data['g41_data'][i], data['g42_data'][i]) np.testing.assert_allclose(data['g41_data'][i], 0, atol=1.e-3) np.testing.assert_allclose(data['g42_data'][i], 0, atol=1.e-3) # I didn't try to figure out what the spin-4 values should be for a Gaussian. # If someone wants to work that out, it would be nice to add a test that they are right. # Empirically, it seems to come out pretty close to 10 x (g1+i g2)^2. # The g^2 bit makes sense, but I can't figure out where the factor of ~10 comes from. print('h4: ', data['h41_data'][i], data['h42_data'][i]) # Repeat, adding in raw_moments config['output']['stats'][0]['raw_moments'] = True piff.piffify(config, logger) data = fitsio.read(hsm_file) # Check that the model and data measurements are close for name in ('T', 'g1', 'g2', 'T4', 'h41', 'h42', 'M00', 'M10', 'M01', 'M11', 'M20', 'M02', 'M22', 'M31', 'M13', 'M40', 'M04', 'M22n', 'M33n', 'M44n'): np.testing.assert_allclose(data[name+'_model'], data[name+'_data'], rtol=1.e-4) for name in ('g41', 'g42', 'M21', 'M12', 'M30', 'M03'): # These are close to 0, so use atol, not rtol. np.testing.assert_allclose(data[name+'_model'], data[name+'_data'], atol=1.e-4) # Check that the moment values are what we intend them to be for i, star in enumerate(stars): moments = piff.util.calculate_moments(star, third_order=True, fourth_order=True, radial=True) # Repeat the tests from above to ensure that raw_moments=True doesn't mess up the # fourth_order=True measurements. T = moments['M11']*2 shape = galsim.Shear(e1=moments['M20']/moments['M11'], e2=moments['M02']/moments['M11']) np.testing.assert_allclose(data['T_data'][i], T, rtol=1.e-5) np.testing.assert_allclose(data['g1_data'][i], shape.g1, rtol=1.e-5) np.testing.assert_allclose(data['g2_data'][i], shape.g2, rtol=1.e-5) T4 = moments['M22'] / T np.testing.assert_allclose(data['T4_data'][i], moments['M22']/moments['M11'], rtol=1.e-5) np.testing.assert_allclose(data['g41_data'][i], moments['M31']/moments['M11']**2 - 3*shape.e1, atol=1.e-5) np.testing.assert_allclose(data['g42_data'][i], moments['M13']/moments['M11']**2 - 3*shape.e2, atol=1.e-5) np.testing.assert_allclose(data['h41_data'][i], moments['M40']/moments['M11']**2, rtol=1.e-5) np.testing.assert_allclose(data['h42_data'][i], moments['M04']/moments['M11']**2, rtol=1.e-5) np.testing.assert_allclose(data['T4_data'][i], T/(1-shape.e**2)**0.5, rtol=0.05) np.testing.assert_allclose(moments['M31']/moments['M11'], 3*moments['M20'], rtol=1.e-3) np.testing.assert_allclose(moments['M13']/moments['M11'], 3*moments['M02'], rtol=1.e-3) np.testing.assert_allclose(data['g41_data'][i], 0, atol=1.e-3) np.testing.assert_allclose(data['g42_data'][i], 0, atol=1.e-3) for name in moments.keys(): # These should be exactly the same. np.testing.assert_allclose(data[name+'_data'][i], moments[name]) # And the model ones should be close. np.testing.assert_allclose(data[name+'_model'][i], moments[name], atol=1.e-4) # Finally make sure raw_moments works without fourth_order=True del config['output']['stats'][0]['fourth_order'] piff.piffify(config, logger) data = fitsio.read(hsm_file) for i, star in enumerate(stars): moments = piff.util.calculate_moments(star, third_order=True, fourth_order=True, radial=True) for name in moments.keys(): # These should be exactly the same. np.testing.assert_allclose(data[name+'_data'][i], moments[name]) # And the model ones should be close. np.testing.assert_allclose(data[name+'_model'][i], moments[name], atol=1.e-4)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_catalogd_ha_with_two_catalogd(self):\n # Verify two catalogd instances are created with one as active.\n catalogds = self.cluster.catalogds()\n assert(len(catalogds) == 2)\n catalogd_service_1 = catalogds[0].service\n catalogd_service_2 = catalogds[1].service\n assert(catalogd_service_1.get_metric_value(\"catalog-server.active-status\"))\n assert(not catalogd_service_2.get_metric_value(\"catalog-server.active-status\"))\n\n # Verify ports of the active catalogd of statestore and impalad are matching with\n # the catalog service port of the current active catalogd.\n self.__verify_statestore_active_catalogd_port(catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(0, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(1, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(2, catalogd_service_1)\n # Verify simple queries are ran successfully.\n self.__run_simple_queries()\n # Verify simple queries with sync_ddl as 1.\n self.__run_simple_queries(sync_ddl=True)\n\n # Restart one coordinator. Verify it get active catalogd address from statestore.\n self.cluster.impalads[0].restart()\n self.cluster.impalads[0].service.wait_for_metric_value('impala-server.ready',\n expected_value=1, timeout=30)\n self.__verify_impalad_active_catalogd_port(0, catalogd_service_1)", "def test_create_hyperflex_app_catalog(self):\n pass", "def test_get_hyperflex_app_catalog_by_moid(self):\n pass", "def test_order_summary_display(self, driver):\n header = Header(driver)\n header.click_cart_menu()\n order_summary_page = OrderSummaryPage(driver)\n assert order_summary_page.order_head == order_summary_page.get_order_head(), \"order summary table header is wrong, please check\"", "def test_hsmcatalog():\n if __name__ == '__main__':\n logger = piff.config.setup_logger(verbose=2)\n else:\n logger = piff.config.setup_logger(log_file='output/test_hsmcatalog.log')\n\n image_file = os.path.join('output','test_stats_image.fits')\n cat_file = os.path.join('output','test_stats_cat.fits')\n psf_file = os.path.join('output','test_starstats.fits')\n hsm_file = os.path.join('output', 'test_hsmcatalog.fits')\n config = {\n 'input' : {\n 'image_file_name' : image_file,\n 'cat_file_name' : cat_file,\n 'stamp_size' : 48,\n },\n 'select' : {\n 'reserve_frac' : 0.2,\n 'seed' : 123\n },\n 'psf' : {\n 'model' : { 'type' : 'Gaussian',\n 'fastfit': True,\n 'include_pixel': False },\n 'interp' : { 'type' : 'Mean' },\n },\n 'output' : {\n 'file_name' : psf_file,\n 'stats' : [\n {\n 'type': 'HSMCatalog',\n 'file_name': hsm_file,\n }\n ]\n }\n }\n piff.piffify(config, logger)\n assert os.path.isfile(hsm_file)\n\n data, header = fitsio.read(hsm_file, header=True)\n for col in ['ra', 'dec', 'x', 'y', 'u', 'v',\n 'T_data', 'g1_data', 'g2_data',\n 'T_model', 'g1_model', 'g2_model',\n 'flux', 'reserve', 'flag_data', 'flag_model']:\n assert len(data[col]) == 10\n true_data = fitsio.read(cat_file)\n\n assert header['PIFF_VERSION'] == piff.__version__\n\n np.testing.assert_allclose(data['x'], true_data['x'])\n np.testing.assert_allclose(data['y'], true_data['y'])\n np.testing.assert_allclose(data['flux'], 123.45, atol=0.001)\n print('reserve = ',data['reserve'])\n print('nreserve = ',np.sum(data['reserve']))\n print('ntot = ',len(data['reserve']))\n assert np.sum(data['reserve']) == int(0.2 * len(data['reserve']))\n np.testing.assert_allclose(data['T_model'], data['T_data'], rtol=1.e-4)\n np.testing.assert_allclose(data['g1_model'], data['g1_data'], rtol=1.e-4)\n np.testing.assert_allclose(data['g2_model'], data['g2_data'], rtol=1.e-4)\n\n # On this file, no hsm errors\n np.testing.assert_array_equal(data['flag_data'], 0)\n np.testing.assert_array_equal(data['flag_model'], 0)\n\n image = galsim.fits.read(image_file)\n world = [image.wcs.toWorld(galsim.PositionD(x,y)) for x,y in zip(data['x'],data['y'])]\n np.testing.assert_allclose(data['ra'], [w.ra.deg for w in world], rtol=1.e-4)\n np.testing.assert_allclose(data['dec'], [w.dec.deg for w in world], rtol=1.e-4)\n\n # Repeat with non-Celestial WCS\n wcs = galsim.AffineTransform(0.26, 0.05, -0.08, -0.24, galsim.PositionD(1024,1024))\n config['input']['wcs'] = wcs\n piff.piffify(config, logger)\n data = fitsio.read(hsm_file)\n np.testing.assert_array_equal(data['ra'], 0.)\n np.testing.assert_array_equal(data['dec'], 0.)\n world = [wcs.toWorld(galsim.PositionD(x,y)) for x,y in zip(data['x'],data['y'])]\n np.testing.assert_allclose(data['u'], [w.x for w in world], rtol=1.e-4)\n np.testing.assert_allclose(data['v'], [w.y for w in world], rtol=1.e-4)\n\n # Use class directly, rather than through config.\n psf = piff.PSF.read(psf_file)\n stars, _, _ = piff.Input.process(config['input'])\n stars = piff.Select.process(config['select'], stars)\n hsmcat = piff.stats.HSMCatalogStats()\n with np.testing.assert_raises(RuntimeError):\n hsmcat.write('dummy') # Cannot write before compute\n hsmcat.compute(psf, stars)\n hsm_file2 = os.path.join('output', 'test_hsmcatalog2.fits')\n with np.testing.assert_raises(ValueError):\n hsmcat.write() # Must supply file_name if not given in constructor\n hsmcat.write(hsm_file2)\n data2 = fitsio.read(hsm_file2)\n for key in data.dtype.names:\n np.testing.assert_allclose(data2[key], data[key], rtol=1.e-5)", "def test_catalogd_ha_with_one_catalogd(self):\n # Verify the catalogd instances is created as active.\n catalogds = self.cluster.catalogds()\n assert(len(catalogds) == 1)\n catalogd_service_1 = catalogds[0].service\n assert(catalogd_service_1.get_metric_value(\"catalog-server.active-status\"))\n\n # Verify ports of the active catalogd of statestore and impalad are matching with\n # the catalog service port of the current active catalogd.\n self.__verify_statestore_active_catalogd_port(catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(0, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(1, catalogd_service_1)\n self.__verify_impalad_active_catalogd_port(2, catalogd_service_1)\n # Verify simple queries are ran successfully.\n self.__run_simple_queries()", "def test_manufacturing_scrap(self):\n\n # Update demo products\n (self.product_4 | self.product_2).write({\n 'tracking': 'lot',\n })\n\n # Update Bill Of Material to remove product with phantom bom.\n self.bom_3.bom_line_ids.filtered(lambda x: x.product_id == self.product_5).unlink()\n\n # Create Inventory Adjustment For Stick and Stone Tools with lot.\n lot_product_4 = self.env['stock.production.lot'].create({\n 'name': '0000000000001',\n 'product_id': self.product_4.id,\n 'company_id': self.env.company.id,\n })\n lot_product_2 = self.env['stock.production.lot'].create({\n 'name': '0000000000002',\n 'product_id': self.product_2.id,\n 'company_id': self.env.company.id,\n })\n\n stock_inv_product_4 = self.env['stock.inventory'].create({\n 'name': 'Stock Inventory for Stick',\n 'product_ids': [(4, self.product_4.id)],\n 'line_ids': [\n (0, 0, {'product_id': self.product_4.id, 'product_uom_id': self.product_4.uom_id.id, 'product_qty': 8, 'prod_lot_id': lot_product_4.id, 'location_id': self.stock_location_14.id}),\n ]})\n\n stock_inv_product_2 = self.env['stock.inventory'].create({\n 'name': 'Stock Inventory for Stone Tools',\n 'product_ids': [(4, self.product_2.id)],\n 'line_ids': [\n (0, 0, {'product_id': self.product_2.id, 'product_uom_id': self.product_2.uom_id.id, 'product_qty': 12, 'prod_lot_id': lot_product_2.id, 'location_id': self.stock_location_14.id})\n ]})\n (stock_inv_product_4 | stock_inv_product_2)._action_start()\n stock_inv_product_2.action_validate()\n stock_inv_product_4.action_validate()\n\n #Create Manufacturing order.\n production_form = Form(self.env['mrp.production'])\n production_form.product_id = self.product_6\n production_form.bom_id = self.bom_3\n production_form.product_qty = 12\n production_form.product_uom_id = self.product_6.uom_id\n production_3 = production_form.save()\n production_3.action_confirm()\n production_3.action_assign()\n\n # Check Manufacturing order's availability.\n self.assertEqual(production_3.reservation_state, 'assigned', \"Production order's availability should be Available.\")\n\n location_id = production_3.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) and production_3.location_src_id.id or production_3.location_dest_id.id,\n\n # Scrap Product Wood without lot to check assert raise ?.\n scrap_id = self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'production_id': production_3.id})\n with self.assertRaises(UserError):\n scrap_id.do_scrap()\n\n # Scrap Product Wood with lot.\n self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'lot_id': lot_product_2.id, 'production_id': production_3.id})\n\n #Check scrap move is created for this production order.\n #TODO: should check with scrap objects link in between", "def test_get_hyperflex_app_catalog_list(self):\n pass", "def test_load_order_precedence(self):\r\n \r\n prod = exepath('mocks/65nm_product.txt')\r\n spec = exepath('mocks/myspec.txt')\r\n user = exepath('mocks/user.txt')\r\n\r\n set = ParameterSet(prod, spec, user)\r\n\r\n #params = set.keys()\r\n #params.sort()\r\n #for param in params:\r\n # print '%s = %s' % (param, set[param])\r\n\r\n # User file should be dominant\r\n self.assertEqual(set['test1.var1'], 'user_1')\r\n self.assertEqual(set['test1.var2'], 'user_2')\r\n self.assertEqual(set['test1.var3'], 'user_3')\r\n \r\n # Spec file should be dominant\r\n self.assertEqual(set['test2.var1'], 'spec_21')\r\n self.assertEqual(set['test2.var2'], 'spec_22')\r\n self.assertEqual(set['test2.var3'], 'spec_23')\r\n \r\n # Product file should be dominant\r\n self.assertEqual(set['test3.var1'], 'prod_31')\r\n self.assertEqual(set['test3.var2'], 'prod_32')\r\n self.assertEqual(set['test3.var3'], 'prod_33')", "def test_ordering(self):\n # \"Album 1\" and \"Album 3\" are regular albums\n # \"Album 2\" is V/A\n # The remaining track will create a non-album track.\n self.add_mp3(artist='Artist 1', title='Title 1',\n album='Album 1', filename='song1.mp3', path='album_1')\n self.add_mp3(artist='Artist 1', title='Title 2',\n album='Album 1', filename='song2.mp3', path='album_1')\n self.add_mp3(artist='Artist 1', title='Title 3',\n album='Album 2', filename='song3.mp3', path='album_2')\n self.add_mp3(artist='Artist 2', title='Title 4',\n album='Album 2', filename='song4.mp3', path='album_2')\n self.add_mp3(artist='Artist 1', title='Title 5',\n album='Album 3', filename='song5.mp3', path='album_3')\n self.add_mp3(artist='Artist 1', title='Title 6',\n album='Album 3', filename='song6.mp3', path='album_3')\n self.add_mp3(artist='Artist 1', title='Title 7',\n filename='song7.mp3')\n self.run_add()\n\n artist = Artist.objects.get(name='Artist 1')\n\n self.assertEqual(Album.objects.count(), 4)\n reg_album_1 = Album.objects.get(name='Album 1')\n reg_album_2 = Album.objects.get(name='Album 3')\n va_album = Album.objects.get(name='Album 2')\n misc_album = Album.objects.get(miscellaneous=True)\n\n response = self.client.get(reverse('exordium:artist', args=(artist.normname,)))\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, '4 albums')\n self.assertContains(response, '6 songs')\n self.assertQuerysetEqual(response.context['albums'].data,\n [repr(al) for al in [reg_album_1, reg_album_2, misc_album, va_album]])\n self.assertQuerysetEqual(response.context['songs'].data,\n [repr(s) for s in Song.objects.filter(artist=artist).order_by('title')])\n\n # There are certainly some duplicate tests happening down here.\n for album in [reg_album_1, reg_album_2, misc_album, va_album]:\n self.assertContains(response, str(album))\n self.assertContains(response, str(album.artist))\n self.assertContains(response, reverse('exordium:album', args=(album.pk,)))\n self.assertContains(response, reverse('exordium:artist', args=(album.artist.normname,)))\n for song in Song.objects.filter(artist=artist):\n self.assertContains(response, str(song.title))\n self.assertContains(response, song.get_download_url_html5())\n self.assertContains(response, song.get_download_url_m3u())\n for song in Song.objects.exclude(artist=artist):\n self.assertNotContains(response, str(song.title))\n self.assertNotContains(response, song.get_download_url_html5())\n self.assertNotContains(response, song.get_download_url_m3u())", "def test_shelflistitem_row_order(api_settings, shelflist_solr_env,\n get_shelflist_urls, api_client, redis_obj,\n get_found_ids):\n recs = shelflist_solr_env.records['shelflistitem']\n loc = recs[0]['location_code']\n loc_recs = [r for r in recs if r['location_code'] == loc]\n index = ShelflistItemIndex()\n manifest = index.get_location_manifest(loc)\n redis_key = '{}:{}'.format(REDIS_SHELFLIST_PREFIX, loc)\n redis_obj(redis_key).set(manifest)\n\n url = get_shelflist_urls(shelflist_solr_env.records['shelflistitem'])[loc]\n response = api_client.get(url)\n total = response.data['totalCount']\n found_ids = get_found_ids('id', response)\n row_numbers = get_found_ids('row_number', response)\n assert found_ids == manifest\n assert row_numbers == [num for num in range(0, total)]", "def testCosmologyCatalog(self):\n dbObj = myTestGals(database=self.dbName)\n cat = cosmologicalGalaxyCatalog(dbObj)\n cat.write_catalog(self.catName)", "def test_listCatalogEntriesWithSortFilters(self):\n expected_orders = {\n 'launch_date': ['25544', '37820'],\n '-launch_date': ['37820', '25544'],\n 'norad_catalog_number': ['25544', '37820'],\n '-norad_catalog_number': ['37820', '25544'],\n }\n\n for param, order in expected_orders.items():\n response = self.client.get(\n '/api/v1/catalogentry/?ordering={}'.format(param)\n )\n content = response.content.decode('utf8')\n json_data = json.loads(content)\n\n for i in range(len(order)):\n self.assertEqual(\n json_data['results'][i]['norad_catalog_number'],\n order[i]\n )", "def test_get_hyperflex_hxdp_version_list(self):\n pass", "def test_entities__PersistentEntities__getMainEntities__1(address_book):\n assert DEFAULT_ORDER == getMainEntities_titles(True)\n assert DEFAULT_ORDER == getMainEntities_titles(False)", "def test_getOrderedFeatures(self):\n print 'Running %s ...' % getName()\n \n s1 = self.sequenceListingFixture.create_sequence_instance(self.sequenceListing) \n \n# test that source feature is at index 0 when feature table has only 1 feature \n source_feature = next((f for f in s1.feature_set.all() if f.featureKey == 'source'), None)\n ordered_features = s1.getOrderedFeatures()\n self.assertTrue(source_feature)\n self.assertEqual(0, ordered_features.index(source_feature))\n \n# add feature\n f1_1 = Feature.objects.create(sequence=s1, \n featureKey='misc_feature', \n location='4')\n \n ordered_features_after_f1_1 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_1.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_1.index(f1_1))\n \n # add feature\n f1_2 = Feature.objects.create(sequence=s1, \n featureKey='misc_feature', \n location='2')\n \n ordered_features_after_f1_2 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_2.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_2.index(f1_2))\n self.assertEqual(2, ordered_features_after_f1_2.index(f1_1))\n \n # add feature\n f1_3 = Feature.objects.create(sequence=s1, \n featureKey='variation', \n location='9')\n \n ordered_features_after_f1_3 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_3.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_3.index(f1_2))\n self.assertEqual(2, ordered_features_after_f1_3.index(f1_1))\n self.assertEqual(3, ordered_features_after_f1_3.index(f1_3))\n \n # add feature\n f1_4 = Feature.objects.create(sequence=s1, \n featureKey='allele', \n location='9')\n \n ordered_features_after_f1_4 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_4.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_4.index(f1_2))\n self.assertEqual(2, ordered_features_after_f1_4.index(f1_1))\n self.assertEqual(3, ordered_features_after_f1_4.index(f1_4))\n self.assertEqual(4, ordered_features_after_f1_4.index(f1_3))\n \n # add feature\n f1_5 = Feature.objects.create(sequence=s1, \n featureKey='iDNA', \n location='9')\n \n ordered_features_after_f1_5 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_5.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_5.index(f1_2))\n self.assertEqual(2, ordered_features_after_f1_5.index(f1_1))\n self.assertEqual(3, ordered_features_after_f1_5.index(f1_4))\n self.assertEqual(4, ordered_features_after_f1_5.index(f1_5))\n self.assertEqual(5, ordered_features_after_f1_5.index(f1_3))\n \n # add feature this will be ordered before 'allele', because \n# capital letters are lower than lower case in ASCII\n f1_6 = Feature.objects.create(sequence=s1, \n featureKey='CDS', \n location='9..17')\n \n ordered_features_after_f1_6 = s1.getOrderedFeatures()\n \n self.assertEqual(0, ordered_features_after_f1_6.index(source_feature))\n self.assertEqual(1, ordered_features_after_f1_6.index(f1_2))\n self.assertEqual(2, ordered_features_after_f1_6.index(f1_1))\n self.assertEqual(3, ordered_features_after_f1_6.index(f1_6))\n self.assertEqual(4, ordered_features_after_f1_6.index(f1_4))\n self.assertEqual(5, ordered_features_after_f1_6.index(f1_5))\n self.assertEqual(6, ordered_features_after_f1_6.index(f1_3))", "def test_shelflistitem_view_orderby(order_by, api_settings, shelflist_solr_env,\n get_shelflist_urls, api_client):\n sl_urls = get_shelflist_urls(shelflist_solr_env.records['shelflistitem'])\n test_url = '{}?orderBy={}'.format(sl_urls.values()[0], order_by)\n response = api_client.get(test_url)\n assert response.status_code == 400\n assert 'not a valid field for ordering' in response.data['detail']", "def test_product_initialization(product_design_space):\n assert product_design_space.name == 'my design space'\n assert product_design_space.description == 'does some things'\n assert len(product_design_space.dimensions) == 3\n assert product_design_space.dimensions[0].descriptor.key == 'alpha'\n assert product_design_space.dimensions[1].descriptor.key == 'beta'\n assert product_design_space.dimensions[2].descriptor.key == 'gamma'", "def test_get_distribution_center_orders(self):\n pass", "def test_update_hyperflex_app_catalog(self):\n pass", "def test_load_groups(query_factory):\n text = (\n \"Order [{one|quantity} {large|size} {Tesora|product} with \"\n \"[{medium|size} {cream|option}|option]|product] from \"\n \"[{Philz|store} in {Downtown Sunnyvale|location}|store]\"\n )\n\n processed_query = markup.load_query(text, query_factory)\n entities = processed_query.entities\n\n assert len(entities) == 7\n\n assert entities[0].text == \"one\"\n assert entities[0].entity.type == \"quantity\"\n assert entities[0].span == Span(6, 8)\n assert entities[0].parent == entities[2]\n\n assert entities[1].text == \"large\"\n assert entities[1].entity.type == \"size\"\n assert entities[1].span == Span(10, 14)\n assert entities[1].parent == entities[2]\n\n assert entities[2].text == \"Tesora\"\n assert entities[2].entity.type == \"product\"\n assert entities[2].span == Span(16, 21)\n assert entities[2].children == (entities[0], entities[1], entities[4])\n\n assert entities[3].text == \"medium\"\n assert entities[3].entity.type == \"size\"\n assert entities[3].span == Span(28, 33)\n assert entities[3].parent == entities[4]\n\n assert entities[4].text == \"cream\"\n assert entities[4].entity.type == \"option\"\n assert entities[4].span == Span(35, 39)\n assert entities[4].parent == entities[2]\n assert entities[4].children == (entities[3],)\n\n assert entities[5].text == \"Philz\"\n assert entities[5].entity.type == \"store\"\n assert entities[5].span == Span(46, 50)\n assert entities[5].children == (entities[6],)\n\n assert entities[6].text == \"Downtown Sunnyvale\"\n assert entities[6].entity.type == \"location\"\n assert entities[6].span == Span(55, 72)\n assert entities[6].parent == entities[5]", "def test1 (self, testStore, dbh, dbn, dbu, dbp):", "def test_st_facets00401m3_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_facets/ST_facets00401m/ST_facets00401m3.xsd\",\n instance=\"sunData/SType/ST_facets/ST_facets00401m/ST_facets00401m3_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_st_facets00201m3_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_facets/ST_facets00201m/ST_facets00201m3.xsd\",\n instance=\"sunData/SType/ST_facets/ST_facets00201m/ST_facets00201m3_p.xml\",\n class_name=\"Test\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_patch_hyperflex_app_catalog(self):\n pass", "def test_get_order(self):\n pass", "def test_catalog_opds(self):\n client = Client()\n response = client.get('/catalog.atom/')\n print 'status code for catalog in opds', response.status_code\n self.failUnlessEqual(response.status_code, 200)", "def test_order(self):\n space1 = Space()\n space1.register(Integer(\"yolo1\", \"uniform\", -3, 6, shape=(2,)))\n space1.register(Integer(\"yolo2\", \"uniform\", -3, 6, shape=(2,)))\n space1.register(Real(\"yolo3\", \"norm\", 0.9))\n space1.register(Categorical(\"yolo4\", (\"asdfa\", 2)))\n\n space2 = Space()\n space2.register(Integer(\"yolo1\", \"uniform\", -3, 6, shape=(2,)))\n space2.register(Real(\"yolo3\", \"norm\", 0.9))\n space2.register(Categorical(\"yolo4\", (\"asdfa\", 2)))\n space2.register(Integer(\"yolo2\", \"uniform\", -3, 6, shape=(2,)))\n\n assert list(space1) == list(space1.keys())\n assert list(space2) == list(space2.keys())\n assert list(space1.values()) == list(space2.values())\n assert list(space1.items()) == list(space2.items())\n assert list(space1.keys()) == list(space2.keys())\n assert list(space1.values()) == list(space2.values())\n assert list(space1.items()) == list(space2.items())", "def test_st_facets00505m3_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/SType/ST_facets/ST_facets00505m/ST_facets00505m3.xsd\",\n instance=\"sunData/SType/ST_facets/ST_facets00505m/ST_facets00505m3_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_create_hyperflex_hxdp_version(self):\n pass" ]
[ "0.5679821", "0.5636545", "0.563482", "0.55582744", "0.5495829", "0.54825354", "0.542668", "0.5405813", "0.53886825", "0.5386127", "0.5382041", "0.5373458", "0.53299606", "0.5318472", "0.5315026", "0.5302098", "0.5281488", "0.52782816", "0.5272447", "0.5270211", "0.52653223", "0.5260833", "0.5247738", "0.5246135", "0.52352226", "0.52234185", "0.520118", "0.5200524", "0.51931256", "0.51775056" ]
0.65491086
0
Test that extra property_cols get output correctly in the hsm output file.
def test_property_cols(): image_file = 'input/D00572501_z_c01_r3624p01_immasked.fits.fz' cat_file = 'input/D00572501_z_c01_r5473p01_piff.fits' psf_file = os.path.join('output','test_property_cols.piff') hsm_file = os.path.join('output','test_property_cols_hsm.fits') nstars = 25 scale = 0.26 size = 15 order = 1 stamp_size = 25 config = { 'input' : { 'nstars': nstars, 'image_file_name' : image_file, 'image_hdu' : 1, 'weight_hdu' : 3, 'badpix_hdu' : 2, 'cat_file_name' : cat_file, 'x_col' : 'XWIN_IMAGE', 'y_col' : 'YWIN_IMAGE', 'sky_col' : 'BACKGROUND', 'stamp_size' : stamp_size, 'ra' : 'TELRA', 'dec' : 'TELDEC', 'gain' : 'GAINA', 'satur' : 'SATURATA', 'chipnum': 1, # Select ones with a variety of dtypes. 'property_cols' : ['SOURCE_ID', 'GI_COLOR', 'FLAGS', 'FLAG_COLOR', 'SPREAD_MODEL'], }, 'select' : { 'type': 'Properties', 'where': 'np.abs(SPREAD_MODEL) < 3.e-4', 'reserve_frac' : 0.2, 'seed' : 1234, }, 'psf' : { 'model' : { 'type' : 'PixelGrid', 'scale' : scale, 'size' : size, 'interp' : 'Lanczos(5)', }, 'interp' : { 'type' : 'BasisPolynomial', 'order' : [1, 1, 1], 'keys': ['u', 'v', 'GI_COLOR'], }, }, 'output' : { 'file_name' : psf_file, 'stats': [ { 'type': 'HSMCatalog', 'file_name': hsm_file, }, ], }, } piff.piffify(config) hsm = fitsio.read(hsm_file) cat = fitsio.read(cat_file) print('hsm dtype = ',hsm.dtype) print('cat dtype = ',cat.dtype) for key in hsm.dtype.names: print(key) if key in cat.dtype.names: assert hsm[key].dtype.type == cat[key].dtype.type elif key == 'reserve': assert hsm[key].dtype.type == np.dtype(bool).type elif key.startswith('flag'): assert hsm[key].dtype.type == np.dtype(int).type elif key == 'sky': # This one is read from the input catalog, but renamed assert hsm[key].dtype.type == np.float32 else: assert hsm[key].dtype.type == np.dtype(float).type # Check that drawing the image works without specifying chipnum. # This is ok so long as the input is really only a single chip. # cf. Issue #140 psf = piff.read(psf_file) im1 = psf.draw(35, 40, center=True, GI_COLOR=1) # If the input field didn't include chipnum, then it makes no difference for a single chip. del config['input']['chipnum'] piff.piffify(config) psf = piff.read(psf_file) im2 = psf.draw(35, 40, center=True, GI_COLOR=1) assert im1 == im2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_overall_report_columns():\n assert (len(overall_data['columns']) == 31)", "def test_columns_property():\n atom = ATOMClassifier(X_bin, y_bin, random_state=1)\n atom.run(\"LR\")\n assert [i == j for i, j in zip(atom.lr.columns, atom.columns)]", "def test_test_inline_additional_properties(self):\n pass", "def test_cols(self):\n gfile = grades.writers.GradesFile(self.fname)\n gfile.table.compute_cumul()\n gfile.table.compute_mean()\n gfile.table_format = 'org'\n self.check_output(self.output_str4, gfile, div_on=('Group',),\n columns=('Nom', 'Test 1', 'Test 2', '*Cumul*'))", "def test_num_columns(self):\n pass", "def reportProperties():", "def test_bedcolumns(self):\n for i in self.__bed_files:\n sys.argv = ['', 'bedcolumns', i]\n bioformats.cli.bioformats()", "def test_optional_cols():\n extract_config_dir = os.path.join(\n settings.BASE_DIR, \"extract_configs\", \"templates\"\n )\n for ft, obj in FILE_TYPES.items():\n ec_file = obj[\"template\"]\n required_cols = obj[\"required_columns\"]\n if not ec_file:\n continue\n\n ec_path = os.path.join(extract_config_dir, ec_file)\n print(f\"Testing extract config: {ec_path}\")\n assert os.path.exists(ec_path)\n\n # Drop columns that are not required\n df = make_template_df(ft)[required_cols]\n\n Extractor().extract(df, ec_path)", "def exportECLPropertyFiles(self, fname):\r\n\r\n # Convert point data to cell data for output\r\n # verifying if this is necessary or if ECLIPSE can use point attributes\r\n pointConvert = True\r\n if pointConvert:\r\n p2c = vtk.vtkPointDataToCellData()\r\n p2c.SetInputDataObject(self.Grid)\r\n p2c.PassPointDataOn()\r\n p2c.Update()\r\n self.Grid = p2c.GetOutput()\r\n\r\n filename, ext = os.path.splitext(fname)\r\n for ia in range(self.Grid.GetCellData().GetNumberOfArrays()):\r\n prop = self.Grid.GetCellData().GetArray(ia).GetName()\r\n print(\"exporting prop\", prop)\r\n if self.GridType == \"vtkStructuredGrid\":\r\n with io.open(filename + \"prop-\" + prop.lower() + \".GRDECL\", 'w', newline='\\r\\n') as f:\r\n f.write('-- Generated [\\n')\r\n f.write('-- Format : ECLIPSE keywords (grid properties) (ASCII)\\n')\r\n f.write('-- Exported by : ReGrid v.' + version + \"\\n\")\r\n f.write('-- User name : ' + getpass.getuser() + \"\\n\")\r\n f.write('-- Date : ' + datetime.now().strftime(\"%A, %B %d %Y %H:%M:%S\") + \"\\n\")\r\n f.write('-- Project : ' + \"ReGrid project\\n\")\r\n f.write('-- Grid : ' + \"Description\\n\")\r\n f.write('-- Unit system : ' + \"ECLIPSE-Field\\n\")\r\n f.write('-- Generated ]\\n\\n')\r\n\r\n f.write(prop.upper() + ' -- Generated : ReGrid\\n')\r\n f.write('-- Property name in Petrel : ' + prop + '\\n')\r\n\r\n c = -999.9999\r\n N = 0\r\n ii = 0\r\n fstr = \" \"\r\n for iz in range(self.nz):\r\n for iy in range(self.nn):\r\n for ix in range(self.ne):\r\n # iac = round(self.Grid.GetCellData().GetArray(ia).GetTuple1(ii), 4)\r\n iac = '{:0.4e}'.format(self.Grid.GetCellData().GetArray(ia).GetTuple1(ii))\r\n print(iac)\r\n ii += 1\r\n if iac == c:\r\n N += 1\r\n else:\r\n if c != -999.9999:\r\n fstr = self.printPROP(f, c, N, fstr)\r\n c = eval(iac)\r\n N = 1\r\n fstr = self.printPROP(f, c, N, fstr)\r\n f.write(fstr)\r\n f.write(\" /\")\r\n f.write(\"\\n\")", "def testAddingPropertyFields(self):\n map_sheet = self.properties[PROPERTY_SHEET]\n for key, value in PROPS.items():\n self.failUnless(map_sheet.hasProperty(key) and list(map_sheet.getProperty(key)) == value)", "def test_filter_maf_file_cols(self):\n maf_lines = [\n ['# comment 1'], # keep the comments\n ['# comment 2'],\n ['Hugo_Symbol', 'foo_value'], # foo_value column should be removed in output\n ['SUFU', '1'],\n ['GOT1', '2']\n ]\n # run the script in a temporary directory\n with TemporaryDirectory() as tmpdir:\n input_maf_file = write_table(tmpdir = tmpdir, filename = 'input.maf', lines = maf_lines)\n input_json = {\n \"input_file\": {\n \"class\": \"File\",\n \"path\": input_maf_file\n },\n \"output_filename\": \"output.maf\"\n }\n\n output_json, output_dir = run_cwl(\n testcase = self,\n tmpdir = tmpdir,\n input_json = input_json,\n cwl_file = cwl_file)\n\n expected_output = {\n 'output_file': {\n 'location': 'file://' + os.path.join(output_dir, 'output.maf'),\n 'basename': 'output.maf',\n 'class': 'File',\n 'checksum': 'sha1$e55f7bdaa146f37b48d6c920ed27184e394ef1e6',\n 'size': 46,\n 'path': os.path.join(output_dir, 'output.maf')\n }\n }\n self.assertDictEqual(output_json, expected_output)\n\n # validate number of lines output\n with open(output_json['output_file']['path']) as fin:\n output_maf_lines = len(fin.readlines())\n self.assertEqual(output_maf_lines, 5)\n\n # validate file contents\n comments, mutations = load_mutations(output_json['output_file']['path'])\n\n expected_comments = ['# comment 1', '# comment 2']\n self.assertEqual(comments, expected_comments)\n\n expected_mutations = [{'Hugo_Symbol': 'SUFU'}, {'Hugo_Symbol': 'GOT1'}]\n self.assertEqual(mutations, expected_mutations)", "def test_validate_properties(self):\n\n ingest_mgmr = IngestManager()\n ingest_mgmr.validate_config_file(self.example_config_data)\n ingest_mgmr.validate_properties()\n assert (ingest_mgmr.collection.name == 'my_col_1')\n assert (ingest_mgmr.experiment.name == 'my_exp_1')\n assert (ingest_mgmr.channel.name == 'my_ch_1')", "def test_gather_columns(self):\n expected_gathered_columns = [\n \"\"\"\n unusable_col: \"DUMMYVALUNUSABLECOL\"\n date.1: date_0 | extra_date_rule | \"(\" + date + \")\"\n datetime.2: datetime_0 | extra_datetime_rule | \"(\" + datetime + \")\"\n datetime_end.1: datetime_0 | datetime_end_conv | datetime_aggr | \"(\" + datetime_end + \")\"\n boolean.1: TRUE | FALSE | extra_bool_rule | \"(\" + boolean + \")\"\n string.1: str_0 | str_1 | ESCAPED_STRING | extra_string_rule | \"(\" + string + \")\"\n num.1: num_0 | NUMBER | extra_num_rule | \"(\" + num + \")\"\n \"\"\",\n \"\"\"\n unusable_col: \"DUMMYVALUNUSABLECOL\"\n date.1: date_0 | extra_date_rule | \"(\" + date + \")\"\n datetime.2: datetime_0 | extra_datetime_rule | \"(\" + datetime + \")\"\n datetime_end.1: datetime_0 | datetime_end_conv | datetime_aggr | \"(\" + datetime_end + \")\"\n boolean.1: bool_0 | TRUE | FALSE | extra_bool_rule | \"(\" + boolean + \")\"\n string.1: str_0 | str_1 | str_2 | ESCAPED_STRING | extra_string_rule | \"(\" + string + \")\"\n num.1: num_0 | NUMBER | extra_num_rule | \"(\" + num + \")\"\n \"\"\",\n \"\"\"\n unusable_col: \"DUMMYVALUNUSABLECOL\"\n date.1: extra_date_rule | \"(\" + date + \")\"\n datetime.2: extra_datetime_rule | \"(\" + datetime + \")\"\n datetime_end.1: datetime_end_conv | datetime_aggr | \"(\" + datetime_end + \")\"\n boolean.1: TRUE | FALSE | extra_bool_rule | \"(\" + boolean + \")\"\n string.1: str_0 | str_1 | ESCAPED_STRING | extra_string_rule | \"(\" + string + \")\"\n num.1: num_0 | NUMBER | extra_num_rule | \"(\" + num + \")\"\n \"\"\",\n \"\"\"\n unusable_col: \"DUMMYVALUNUSABLECOL\"\n date.1: extra_date_rule | \"(\" + date + \")\"\n datetime.2: extra_datetime_rule | \"(\" + datetime + \")\"\n datetime_end.1: datetime_end_conv | datetime_aggr | \"(\" + datetime_end + \")\"\n boolean.1: TRUE | FALSE | extra_bool_rule | \"(\" + boolean + \")\"\n string.1: str_0 | str_1 | ESCAPED_STRING | extra_string_rule | \"(\" + string + \")\"\n num.1: num_0 | NUMBER | extra_num_rule | \"(\" + num + \")\"\n \"\"\",\n \"\"\"\n unusable_col: \"DUMMYVALUNUSABLECOL\"\n date.1: date_0 | extra_date_rule | \"(\" + date + \")\"\n datetime.2: datetime_0 | extra_datetime_rule | \"(\" + datetime + \")\"\n datetime_end.1: datetime_0 | datetime_end_conv | datetime_aggr | \"(\" + datetime_end + \")\"\n boolean.1: TRUE | FALSE | extra_bool_rule | \"(\" + boolean + \")\"\n string.1: str_0 | str_1 | ESCAPED_STRING | extra_string_rule | \"(\" + string + \")\"\n num.1: num_0 | NUMBER | extra_num_rule | \"(\" + num + \")\"\n \"\"\",\n \"\"\"\n unusable_col: \"DUMMYVALUNUSABLECOL\"\n date.1: extra_date_rule | \"(\" + date + \")\"\n datetime.2: extra_datetime_rule | \"(\" + datetime + \")\"\n datetime_end.1: datetime_end_conv | datetime_aggr | \"(\" + datetime_end + \")\"\n boolean.1: TRUE | FALSE | extra_bool_rule | \"(\" + boolean + \")\"\n string.1: ESCAPED_STRING | extra_string_rule | \"(\" + string + \")\"\n num.1: num_0 | NUMBER | extra_num_rule | \"(\" + num + \")\"\n \"\"\",\n ]\n for selectable, expected_gathered in zip(\n self.selectables, expected_gathered_columns\n ):\n columns = make_column_collection_for_selectable(selectable)\n gathered_columns = f\"\"\"\n {gather_columns(\"unusable_col\", columns, \"unusable\")}\n {gather_columns(\"date.1\", columns, \"date\", additional_rules=[\"extra_date_rule\"])}\n {gather_columns(\"datetime.2\", columns, \"datetime\", additional_rules=[\"extra_datetime_rule\"])}\n {gather_columns(\"datetime_end.1\", columns, \"datetime\", additional_rules=[\"datetime_end_conv\", \"datetime_aggr\"])}\n {gather_columns(\"boolean.1\", columns, \"bool\", additional_rules=[\"TRUE\", \"FALSE\", \"extra_bool_rule\"])}\n {gather_columns(\"string.1\", columns, \"str\", additional_rules=[\"ESCAPED_STRING\", \"extra_string_rule\"])}\n {gather_columns(\"num.1\", columns, \"num\", additional_rules=[\"NUMBER\", \"extra_num_rule\"])}\n \"\"\"\n self.assertEqual(\n str_dedent(gathered_columns), str_dedent(expected_gathered)\n )", "def test_metadata_filter_hmp(self):\n table_factory = DataTableFactory(PACKET_DIR)\n hmp1 = table_factory.hmp()\n\n metadata = pd.DataFrame({'foo': {'haib18CEM5332_HMGTJCCXY_SL342402': 1}})\n table_factory.set_metadata(metadata)\n hmp2 = table_factory.hmp()\n\n self.assertEqual(hmp1.shape[0] // 2, hmp2.shape[0])", "def output_columns(self) -> List[str]:", "def test_attributes(cls):\n table_data = [\n ['Name', 'Color', 'Type'],\n ['Avocado', 'green', 'nut'],\n ['Tomato', 'red', 'fruit'],\n ['Lettuce', 'green', 'vegetable'],\n ]\n table = cls(table_data) # '| Lettuce | green | vegetable |'\n\n table.outer_border = False\n assert 58 == table.column_max_width(0)\n assert 56 == table.column_max_width(1)\n assert 60 == table.column_max_width(2)\n table.outer_border = True\n\n table.inner_column_border = False\n assert 58 == table.column_max_width(0)\n assert 56 == table.column_max_width(1)\n assert 60 == table.column_max_width(2)\n table.outer_border = False\n assert 60 == table.column_max_width(0)\n assert 58 == table.column_max_width(1)\n assert 62 == table.column_max_width(2)\n table.outer_border = True\n table.inner_column_border = True\n\n table.padding_left = 0\n assert 59 == table.column_max_width(0)\n assert 57 == table.column_max_width(1)\n assert 61 == table.column_max_width(2)\n table.padding_right = 5\n assert 47 == table.column_max_width(0)\n assert 45 == table.column_max_width(1)\n assert 49 == table.column_max_width(2)", "def test_get_data_columns():\n logger.info(\"No unit tests exist for pudl.analysis.spatial.get_data_columns()\")", "def test_header_file_with_cols(suffix: str) -> None:\n path = rsc / header_file\n columns = [\"One\", \"Two\", \"Three\", \"Four\", \"Five\"]\n df = read_ods(path.with_suffix(suffix), \"Sheet1\", columns=columns)\n\n assert list(df.columns) == columns\n assert len(df) == 10\n assert len(df.columns) == 5", "def test_dev_props(name, properties):\n assert properties['x']\n assert properties['y']", "def test_analyze_columns_with_model(self):\n\t\t\n\n\t\tdetails = self.watcher.analyze(model=self.model)\n\t\tself.assertEqual(isinstance(details, pd.DataFrame), True, \"details is a pandas DataFrame\")\n\n\t\tcolumns = \"layer_id,name,D,M,N,alpha,alpha_weighted,has_esd,lambda_max,layer_type,log_alpha_norm,log_norm,log_spectral_norm,norm,num_evals,rank_loss,rf,sigma,spectral_norm,stable_rank,sv_max,sv_min,xmax,xmin,num_pl_spikes,weak_rank_loss\".split(',')\n\t\tprint(details.columns)\n\t\tfor key in columns:\n\t\t\tself.assertTrue(key in details.columns, \"{} in details. Columns are {}\".format(key, details.columns))", "def test_nwb_extractor_property_retrieval(path_to_nwbfile, nwbfile_with_ecephys_content):\n\n electrical_series_name_list = [\"ElectricalSeries1\", \"ElectricalSeries2\"]\n for electrical_series_name in electrical_series_name_list:\n recording_extractor = NwbRecordingExtractor(path_to_nwbfile, electrical_series_name=electrical_series_name)\n\n nwbfile = nwbfile_with_ecephys_content\n electrical_series = nwbfile.acquisition[electrical_series_name]\n electrical_series_electrode_indices = electrical_series.electrodes.data[:]\n electrodes_table = nwbfile.electrodes.to_dataframe()\n sub_electrodes_table = electrodes_table.iloc[electrical_series_electrode_indices]\n\n expected_property = sub_electrodes_table[\"property\"].values\n extracted_property = recording_extractor.get_property(\"property\")\n assert np.array_equal(extracted_property, expected_property)", "def custom_props():\r\n section = document.add_section()\r\n new_width, new_height = section.page_height, section.page_width\r\n section.orientation = WD_ORIENT.LANDSCAPE\r\n section.page_width = 7772400\r\n section.page_height = 10058400\r\n document.add_heading('Custom Properties', level=1)\r\n\r\n customproperties = get_qlik_sense.get_customprop()\r\n num_of_customproperties = len(customproperties)\r\n table = document.add_table(rows=num_of_customproperties+1, cols=3)\r\n table.style = 'Grid Table 1 Light Accent 1'\r\n row = table.rows[0]\r\n row.cells[0].text = 'name'\r\n row.cells[1].text = 'choice values'\r\n row.cells[2].text = 'object types'\r\n\r\n for customproperty in range(num_of_customproperties):\r\n row = table.rows[customproperty+1]\r\n row.cells[0].text = str(customproperties[customproperty][0])\r\n row.cells[1].text = ', '.join(customproperties[customproperty][1])\r\n row.cells[2].text = ', '.join(customproperties[customproperty][2])\r\n document.add_page_break()", "def _add_necessary_columns(args, custom_columns):\n # we need to add the variant's chrom, start and gene if \n # not already there.\n if custom_columns.find(\"gene\") < 0:\n custom_columns += \", gene\"\n if custom_columns.find(\"start\") < 0:\n custom_columns += \", start\"\n \n return custom_columns", "def test_empty_cols_allowed(self):\n self.test_table.allow_empty_columns = True\n self.test_table.change_header(Path=1, SectionType=3, Value=4)\n self.assertEqual(self.test_table._header, [\"Path\", None, \"SectionType\",\n \"Value\"])", "def test_output_ensure_output_for_property(profile_from_dataset):\n output = CheckOutput(profile=profile_from_dataset)\n\n output.ensure_output_for_property(\"PRES\")\n flags = output.get_output_flags_for_property(\"PRES\")\n\n assert flags is not None\n assert isinstance(flags, ma.MaskedArray)\n assert np.all(flags == ArgoQcFlag.GOOD.value)", "def test_filter_maf_file_cols_full(self):\n input_maf = os.path.join(DATA_SETS['Proj_08390_G']['MAF_DIR'], \"Sample1.Sample2.muts.maf\")\n\n with TemporaryDirectory() as tmpdir:\n input_json = {\n \"input_file\": {\n \"class\": \"File\",\n \"path\": input_maf\n },\n \"output_filename\": \"output.maf\"\n }\n\n output_json, output_dir = run_cwl(\n testcase = self,\n tmpdir = tmpdir,\n input_json = input_json,\n cwl_file = cwl_file)\n\n expected_output = {\n 'output_file': {\n 'location': 'file://' + os.path.join(output_dir, 'output.maf'),\n 'basename': 'output.maf',\n 'class': 'File',\n 'checksum': 'sha1$a2f5b9f1533fd443b41561ca718ffca62ab45f36',\n 'size': 2710681,\n 'path': os.path.join(output_dir, 'output.maf')\n }\n }\n self.assertDictEqual(output_json, expected_output)\n\n # validate number of lines output\n with open(output_json['output_file']['path']) as fin:\n output_maf_lines = len(fin.readlines())\n self.assertEqual(output_maf_lines, 12518)\n\n # validate file contents\n comments, mutations = load_mutations(output_json['output_file']['path'])\n\n self.assertEqual(len(mutations), 12514)\n\n for key in mutations[0].keys():\n self.assertTrue(key in cols_to_keep)\n\n # make sure there are fewer than or equal to the number of columns in new output as there are entries to keep \n self.assertTrue( len(mutations[0].keys()) <= len(cols_to_keep) )", "def test_write_device_properties_to_db(self):\n initial_count = self._count_device_properties()\n tango_sim_generator.write_device_properties_to_db(\n self.sim_device.name(), self.expected_model, self.db_instance\n )\n num_expected_properties = len(self.expected_model.sim_properties.keys())\n final_count = self._count_device_properties()\n num_added_properties = final_count - initial_count\n self.assertEquals(num_expected_properties, num_added_properties)", "def testWriteSolutionProperties(self):\n file_writer = writers.VSSolutionFileWriter()\n\n file_writer._file = io.BytesIO()\n\n file_writer._WriteSolutionProperties()\n\n file_writer._file.seek(0, os.SEEK_SET)\n output_data = file_writer._file.read()\n\n expected_output_data = (\n b'\\tGlobalSection(SolutionProperties) = preSolution\\r\\n'\n b'\\t\\tHideSolutionNode = FALSE\\r\\n'\n b'\\tEndGlobalSection\\r\\n')\n self.assertEqual(output_data, expected_output_data)", "def test_basic_columns(self):\n\t\t\n\t\tdetails = self.watcher.describe()\n\t\tself.assertEqual(isinstance(details, pd.DataFrame), True, \"details is a pandas DataFrame\")\n\n\t\tfor key in ['layer_id', 'name', 'M', 'N', 'Q', 'longname']:\n\t\t\tself.assertTrue(key in details.columns, \"{} in details. Columns are {}\".format(key, details.columns))\n\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\tQ = details.Q.to_numpy()[0]\n\n\t\tself.assertAlmostEqual(Q, N/M, places=2)", "def test_basic_columns(self):\n\t\t\n\t\tdetails = self.watcher.describe()\n\t\tself.assertEqual(isinstance(details, pd.DataFrame), True, \"details is a pandas DataFrame\")\n\n\t\tfor key in ['layer_id', 'name', 'M', 'N', 'Q', 'longname']:\n\t\t\tself.assertTrue(key in details.columns, \"{} in details. Columns are {}\".format(key, details.columns))\n\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\tQ = details.Q.to_numpy()[0]\n\n\t\tself.assertAlmostEqual(Q, N/M, places=2)" ]
[ "0.6619292", "0.6011231", "0.59809303", "0.5905065", "0.585393", "0.57667685", "0.57197595", "0.5662904", "0.5658392", "0.5605069", "0.5597131", "0.55891824", "0.55626684", "0.5546742", "0.5512642", "0.54545933", "0.54532987", "0.5363769", "0.5359847", "0.53222424", "0.53192085", "0.531845", "0.5308694", "0.5303005", "0.5299923", "0.5290526", "0.52890354", "0.5288745", "0.52880746", "0.52880746" ]
0.69262546
0
Clean realtime ACE data using the status flag. Note Supports 'clean' and 'dirty'. Replaces all fill values with NaN. Clean status flag of zero (nominal data) Dirty status flag < 9 (accepts bad data record, removes no data record)
def clean(self): # Perform the standard ACE cleaning max_status = mm_ace.clean(self) # Replace bad values with NaN and remove times with no valid data self.data = self.data[self.data['status'] <= max_status] return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean(self):\n # Perform the standard ACE cleaning\n max_status = mm_ace.clean(self)\n\n # Replace bad values with NaN and remove times with no valid data\n ecols = ['eflux_38-53', 'eflux_175-315']\n\n # Evaluate the electron flux data\n self[self.data['status_e'] > max_status, ecols] = np.nan\n\n # Evaluate the proton flux data\n pcols = ['pflux_47-68', 'pflux_115-195', 'pflux_310-580',\n 'pflux_795-1193', 'pflux_1060-1900']\n self[self.data['status_p'] > max_status, pcols] = np.nan\n\n # Include both fluxes and the anisotropy index in the removal eval\n eval_cols = ecols + pcols\n eval_cols.append('anis_ind')\n\n # Remove lines without any good data\n good_cols = (np.isfinite(self.data.loc[:, eval_cols])).sum(axis=1)\n bad_index = good_cols[good_cols == 0].index\n self.data = self.data.drop(index=bad_index)\n\n return", "def clean(self):\n\n if (self.clean_level == 'dusty') | (self.clean_level == 'clean'):\n idx, = np.where(self['B_flag'] == 0)\n self.data = self[idx, :]\n\n return", "def full_clean():\n response_to_df_csv()\n dirty_data = pd.read_csv(\"./data/dirty_data.csv\")\n cleaned_data = dirty_data\n cleaned_data = drop_cols(cleaned_data)\n cleaned_data = lowercase_columns(cleaned_data)\n cleaned_data = make_numeric(cleaned_data)\n cleaned_data = drop_zero_pay(cleaned_data)\n cleaned_data = add_log_col(cleaned_data)\n cleaned_data.to_csv('./data/cleaned_for_testing.csv')\n\n return cleaned_data", "def clearMyStatus(self):\n self.maxBattery = 0\n self.currentPower = 0\n self.thrust = 0.0\n self.rotation = 0.0\n self.radar = 0\n self.jamming = 0\n self.repair = 0\n self.mass = 0.0\n self.accel = 0.0\n self.maxAssault = 0\n self.assaultStrength = 0", "def clean(self):\n self.df = _data.prune(self.df, [REGEX_PATTERN_GCI, REGEX_PATTERN_DB_ID])\n self.df, _ = _data.remove_totally_failed_tests(self.df)\n self.is_cleaned = True", "def clean(c):", "def truth_clean():\n frame = pd.read_csv(PATH + 'truth_clean.csv', decimal=',')\n frame.set_index('id', drop=False, inplace=True, verify_integrity=True)\n frame.beeID = frame.beeID.apply(parse_float_list)\n frame.descriptor = frame.descriptor.apply(parse_float_list)\n return frame", "def clean_data(df):\n\n global cato\n # YearRemodAdd: Remodel date (same as construction date if no remodeling or additions)\n df.YearRemodAdd = np.where(df.YearRemodAdd < df.YearBuilt, df.YearBuilt, df.YearRemodAdd)\n assert len(df.loc[df.YearRemodAdd < df.YearBuilt]) == 0, 'Check YearRemodAdd - should be greater or equal then YearBuilt'\n \n # Check range of years\n yr_max = 2022\n # Some values of GarageYrBlt are corrupt. Fix them by replacing them with the YearBuilt\n df.GarageYrBlt = np.where(df.GarageYrBlt > yr_max, df.YearBuilt, df.GarageYrBlt)\n assert df.YearBuilt.max() < yr_max and df.YearBuilt.min() > 1800, 'Check YearBuilt min() and max()'\n assert df.YearRemodAdd.max() < yr_max and df.YearRemodAdd.min() > 1900, 'Check YearRemodAdd min() and max()'\n assert df.YrSold.max() < yr_max and df.YrSold.min() > 2000, 'Check YrSold min() and max()'\n assert df.GarageYrBlt.max() < yr_max and df.GarageYrBlt.min() >= 0, 'Check GarageYrBlt min() and max()'\n \n # Check values of ordinal catagorical variables\n for k in cato.keys():\n assert set(df[k].unique()).difference(df[k].cat.categories) == set(), f'Check values of {k}'\n \n # Check typos in nominal categorical variables\n df['Exterior2nd'] = df['Exterior2nd'].replace({'Brk Cmn':'BrkComm', 'CmentBd':'CemntBd', 'Wd Shng':'WdShing'})\n # Renew a data type after replacement\n df['Exterior2nd'] = df['Exterior2nd'].astype(\"category\")\n if \"None\" not in df['Exterior2nd'].cat.categories:\n df['Exterior2nd'].cat.add_categories(\"None\", inplace=True)\n\n return df", "async def clean_status(self):\n async with self._mongo.create_session() as session:\n await self._mongo.status.find_one_and_update(\n {\"_id\": \"hmm\"},\n {\"$set\": {\"installed\": None, \"task\": None, \"updates\": []}},\n session=session,\n )", "def clean():\n filter_phase_data()\n combine_phase_data()\n remove_duplicates_phase_data()", "def cleaning (data):", "def clean_dataframe(self, df_in , what = ''): \n \n if what == 'era5fb': # cleaning the era5 feedback only \n df = df_in[np.isfinite(df_in['obsvalue@body'])]\n try: \n df = df.loc[ df['vertco_type@body'] != 2 ] \n except:\n pass\n df = df.reindex()\n df = df[np.isfinite(df['vertco_reference_1@body'])]\n #print('check lengths: ' , len(df_in) , len(df) )\n new_ind = np.array ( range(len(df))) \n df['index'] =new_ind\n df = df.set_index('index')\n \n else: \n ### check if can be optimized ???\n df = df_in.loc[ df_in['z_coordinate_type'] != 2 ] # case where the levels are given in terms of geopotential only (pressure not available)\n \n df = df.loc[ (df['observation_value'] != -99999.0) \n & (df['observation_value'] != -999.0) \n & (df['observation_value'] != -9999) \n & (df['observation_value'] != -9999.0) \n & (df['observation_value'] != -999.9) \n & (df['observation_value'] != -8888 )\n & (df['observation_value'] != -8888.0 )\n \n #& (df['z_coordinate_type'] != 2) \n & (df['z_coordinate'] != -99999.0) \n & (df['z_coordinate'] != -9999.0 )\n & (df['z_coordinate'] != 999 )\n & (df['z_coordinate'] != 999.0 )\n \n \n ] #cleaning the values \n #clean = clean.loc[ (clean['z_coordinate_type'] != 2)] #cleaning the values\n #clean = clean.loc[ (clean['z_coordinate'] != -99999.0 )] #cleaning the values\n \n df = df[np.isfinite(df['observation_value'])] # excluding nan values \n df = df[np.isfinite(df['z_coordinate'])]\n \n return df", "def minimal_clean_data_inplace(df):\n # There are some 'unknown' users in train dataset only\n unknown_data_lines = df['sexo'].isnull() & df['age'].isnull() & df['ind_empleado'].isnull() & \\\n df['fecha_alta'].isnull() & df['pais_residencia'].isnull()\n\n logging.info(\"- Number of lines with unknown data : %s\" % unknown_data_lines.sum())\n\n # Remove these users as clients\n _clients = df[unknown_data_lines]['ncodpers'].unique()\n bad_lines = df['ncodpers'].isin(_clients)\n df.drop(df[bad_lines].index, inplace=True)\n\n logging.info(\"- Number of columns with nan : %s\" % df.isnull().any().sum())\n\n # Remove accent\n df.loc[df['nomprov'] == \"CORU\\xc3\\x91A, A\", \"nomprov\"] = \"CORUNA\"\n\n unknown_cols = ['sexo',\n 'ind_empleado',\n 'pais_residencia',\n 'ult_fec_cli_1t',\n 'conyuemp',\n 'canal_entrada',\n 'nomprov',\n 'segmento',\n 'tiprel_1mes',\n 'indrel_1mes']\n # Start with cols -> replace nan with UNKNOWN\n for col in unknown_cols:\n df.loc[df[col].isnull(), col] = \"UNKNOWN\"\n\n # Set unknown renta to -99\n df.loc[df['renta'].isnull(), 'renta'] = -99\n\n # Next `fecha_alta` :\n assert df['fecha_alta'].isnull().sum() == 0, \\\n \"Need to replace nan in 'fecha_alta', count=%s\" % df['fecha_alta'].isnull().sum()\n\n # **Remove 'tipodom' and 'cod_prov' columns**\n df.drop([\"tipodom\", \"cod_prov\"], axis=1, inplace=True)\n \n # Convert 'ind_nuevo' to int\n df['ind_nuevo'] = df['ind_nuevo'].astype(int)\n \n # Remove floating point at string indrel_1mes\n df['indrel_1mes'] = df['indrel_1mes'].apply(lambda x: str(int(float(x))) if len(x) == 3 else x)\n\n if \"ind_nomina_ult1\" in df.columns and \"ind_nom_pens_ult1\" in df.columns:\n # Target labels : `ind_nomina_ult1`, `ind_nom_pens_ult1` : nan -> 0\n # I could try to fill in missing values for products by looking at previous months,\n # but since it's such a small number of values for now I'll take the cheap way out.\n df.loc[df.ind_nomina_ult1.isnull(), \"ind_nomina_ult1\"] = 0\n df.loc[df.ind_nom_pens_ult1.isnull(), \"ind_nom_pens_ult1\"] = 0\n\n # replace 'antiguedad' with the number of months between 'fecha_alta' and 'fecha_dato'\n func1 = lambda x: _to_ym_dec(to_yearmonth(x))\n func2 = lambda x: max(_to_nb_months(x), 0) \n\n v1 = df['fecha_dato'].apply(func1)\n v2 = df['fecha_alta'].apply(func1)\n v3 = (v1 - v2).apply(func2)\n df.loc[:, 'antiguedad'] = v3\n \n # Replace 'ult_fec_cli_1t' by current nb of months from fecha_dato, if negative, set to zero\n mask = df['ult_fec_cli_1t'] == 'UNKNOWN'\n df.loc[mask, 'ult_fec_cli_1t'] = df[mask]['fecha_dato']\n v1 = df['fecha_dato'].apply(func1)\n v2 = df['ult_fec_cli_1t'].apply(func1)\n v3 = (v1 - v2).apply(func2)\n df.loc[:, 'ult_fec_cli_1t'] = v3", "def clean(self, uid, states=None):\n\n # doesn't change status", "def _rm_air_temperature_status_flag(cube: Cube) -> Cube:\n coord_name = \"air_temperature status_flag\"\n try:\n coord = cube.coord(coord_name)\n except CoordinateNotFoundError:\n coord = None\n\n if coord:\n if coord.attributes != {\n \"flag_meanings\": \"above_surface_pressure below_surface_pressure\",\n \"flag_values\": np.array([0, 1], dtype=\"int8\"),\n }:\n raise ValueError(\n f\"'{coord_name}' coordinate is not of the expected form.\"\n )\n ncube = CubeList()\n for cc in cube.slices_over(\"realization\"):\n coord = cc.coord(coord_name)\n if np.ma.is_masked(coord.points):\n raise ValueError(\n f\"'{coord_name}' coordinate has unexpected mask values.\"\n )\n mask = np.asarray(coord.points)\n cc.data[mask.astype(bool)] = np.nan\n cc.remove_coord(coord_name)\n ncube.append(cc)\n cube = ncube.merge_cube()\n return cube", "def clean(df):", "def clean(args):\n with_dataset(args, Dataset._clean)", "def test_statusClean(self):\n reposDir = self.makeRepository(self.tmpDir)\n self.assertTrue(self.createCommand.isStatusClean(reposDir))", "def clean(self):\n if self.reloading:\n self.cleaned = pd.concat(\n [self.raw[0: self.brkIdx1+1],\n self.raw[self.brkIdx3+1: self.brkIdx4+1]])\n else:\n self.cleaned = self.raw[0: self.brkIdx1+1]\n self.cleaned.reset_index(drop=True, inplace=True) # update idx\n # -- Cubic spline that passes through the data\n sigmaLog = np.log10(self.cleaned['stress'][1:])\n cs = CubicSpline(x=sigmaLog, y=self.cleaned['e'][1:])\n self.eSigmaV = float(cs(np.log10(self.sigmaV))) # void ratio at sigmaV\n return", "def clean(self):\n self._raw_execute(\"clean\", {\"job_id\": self.job_id})", "def clean(self, quick_clean=False):\r\n date_time('Cleaning')\r\n self.cursor.execute('DELETE FROM citations WHERE citation IS NULL OR citation = \"\" ;')\r\n self.conn.execute(\"VACUUM\")\r\n self.conn.commit()\r\n gc.collect()", "def check_clear_flags(self):\n self._command(self.commands[\"CLEAR_ERROR_FLAGS\"])\n self._command(self.commands[\"CLEAR_REBOOTED_FLAG\"])", "def refreshStatus(self,installers):\n data_sizeCrc = self.data_sizeCrc\n data_sizeCrcDate = installers.data_sizeCrcDate\n abnorm_sizeCrc = installers.abnorm_sizeCrc\n missing = self.missingFiles\n mismatched = self.mismatchedFiles\n misEspmed = self.mismatchedEspms\n underrides = set()\n status = 0\n missing.clear()\n mismatched.clear()\n misEspmed.clear()\n if self.type == 0:\n status = -20\n elif data_sizeCrc:\n for file,sizeCrc in data_sizeCrc.iteritems():\n sizeCrcDate = data_sizeCrcDate.get(file)\n if not sizeCrcDate:\n missing.add(file)\n elif sizeCrc != sizeCrcDate[:2]:\n mismatched.add(file)\n if not file.shead and reModExt.search(file.s):\n misEspmed.add(file)\n if sizeCrc == abnorm_sizeCrc.get(file):\n underrides.add(file)\n if missing: status = -10\n elif misEspmed: status = 10\n elif mismatched: status = 20\n else: status = 30\n #--Clean Dirty\n dirty_sizeCrc = self.dirty_sizeCrc\n for file,sizeCrc in dirty_sizeCrc.items():\n sizeCrcDate = data_sizeCrcDate.get(file)\n if (not sizeCrcDate or sizeCrc != sizeCrcDate[:2] or\n sizeCrc == data_sizeCrc.get(file)\n ):\n del dirty_sizeCrc[file]\n #--Done\n (self.status,oldStatus) = (status,self.status)\n (self.underrides,oldUnderrides) = (underrides,self.underrides)\n return (self.status != oldStatus or self.underrides != oldUnderrides)", "async def async_set_clean(self, clean, state_mode):\n if clean not in ON_OFF_LIST:\n return\n self._clean = clean.lower()\n self._state_mode = state_mode\n await self.async_send_cmd()", "def validate_clean(self, clean):\n clean_status = cache.get(CLEAN_STATUS)\n\n if clean == CLEAN_COMMAND_START and clean_status in (CLEAN_STATUS_CLEANING, CLEAN_STATUS_STOP_REQUESTED):\n raise serializers.ValidationError(self.error_messages['cant_start_clean'])\n\n if clean == CLEAN_COMMAND_STOP and clean_status in (CLEAN_STATUS_NOT_CLEANING, CLEAN_STATUS_STOP_REQUESTED):\n raise serializers.ValidationError(self.error_messages['cant_stop_clean'])\n\n return clean", "def deep_processing_rerun_all(self):\r\n sql = \"\"\"SELECT * FROM emails \r\n WHERE email_status = 'processing' \r\n AND clean_type = 1\"\"\"\r\n df = self.db.read_sql(sql)\r\n\r\n for i in range(df.index.size):\r\n rec = df.loc[i, :]\r\n self.deep_clean_one(rec[EMAIL], dealno=rec['dealno'])\r\n self.db.con.commit()\r\n print('Reprocessed {} records that were stuck in the processing status'.format(df.index.size))", "def cleanup(adata, del_prediction=False, del_2nd_moments=False):\n\n if \"pca_fit\" in adata.uns_keys():\n adata.uns[\"pca_fit\"] = None\n if \"velocyto_SVR\" in adata.uns_keys():\n adata.uns[\"velocyto_SVR\"][\"SVR\"] = None\n if \"umap_fit\" in adata.uns_keys():\n adata.uns[\"umap_fit\"][\"fit\"] = None\n if \"velocity_pca_fit\" in adata.uns_keys():\n adata.uns[\"velocity_pca_fit\"] = None\n if \"kmc\" in adata.uns_keys():\n adata.uns[\"kmc\"] = None\n if \"kinetics_heatmap\" in adata.uns_keys():\n adata.uns.pop(\"kinetics_heatmap\")\n if \"hdbscan\" in adata.uns_keys():\n adata.uns.pop(\"hdbscan\")\n\n VF_keys = [i if i.startswith(\"VecFld\") else None for i in adata.uns_keys()]\n for i in VF_keys:\n if i is not None and \"VecFld2D\" in adata.uns[i].keys():\n del adata.uns[i][\"VecFld2D\"]\n\n fate_keys = [i if i.startswith(\"fate\") else None for i in adata.uns_keys()]\n for i in fate_keys:\n if i is not None:\n if adata.uns[i][\"init_cells\"] is not None:\n adata.uns[i][\"init_cells\"] = list(adata.uns[i][\"init_cells\"])\n if \"prediction\" in adata.uns[i].keys():\n if del_prediction:\n del adata.uns[i][\"prediction\"]\n if \"VecFld_true\" in adata.uns[i].keys():\n if adata.uns[i][\"VecFld_true\"] is not None:\n del adata.uns[i][\"VecFld_true\"]\n\n if del_2nd_moments:\n from .tools.utils import remove_2nd_moments\n\n remove_2nd_moments(adata)\n\n return adata", "def clean(self):\n if self.verbosity:\n self.header(\"Cleaning data files\")\n\n tsv_list = os.listdir(self.tsv_dir)\n\n if self.resume_mode:\n # get finished clean command logs of last update\n prev_cleaned = [\n x.file_name + '.TSV'\n for x in self.log_record.called.filter(\n command='cleancalaccessrawfile',\n finish_datetime__isnull=False\n )\n ]\n self.log(\"{} files already cleaned.\".format(len(prev_cleaned)))\n # remove these from tsv_list\n tsv_list = [x for x in tsv_list if x not in prev_cleaned]\n\n # Loop through all the files in the source directory\n if self.verbosity:\n tsv_list = progress.bar(tsv_list)\n for name in tsv_list:\n call_command(\n \"cleancalaccessrawfile\",\n name,\n verbosity=self.verbosity,\n keep_files=self.keep_files,\n )", "async def clean(self, ctx):\n pass", "def reset_status(self):\n logging.debug(f\"\"\"reset_status\"\"\")\n conn = self.connect(cxRepo)\n sql = f\"\"\"update {self.schemaRepo}.tablediff set server1_status = null,\n server2_status = null where server1_status = 'running'\"\"\"\n with conn:\n with conn.cursor() as curs:\n try:\n curs.execute(sql)\n except conn.DatabaseError as exc:\n error, = exc.args\n logging.error(f\"\"\"error executing {sql} : {error}\"\"\")" ]
[ "0.7062222", "0.63833", "0.5580094", "0.54640067", "0.54481626", "0.54345834", "0.53934324", "0.53778213", "0.5357182", "0.5213894", "0.5210961", "0.5201395", "0.51904184", "0.51801556", "0.5167267", "0.51572317", "0.5042241", "0.5039813", "0.5034535", "0.4995818", "0.49740258", "0.496519", "0.4959539", "0.4940271", "0.4928441", "0.48955953", "0.4857228", "0.48563048", "0.48479512", "0.48208517" ]
0.7725932
0
Sets the last_modified_by of this JsonJdbcIngestionProperties.
def last_modified_by(self, last_modified_by): self._last_modified_by = last_modified_by
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def last_modified_by(self, last_modified_by):\n if last_modified_by is not None and len(last_modified_by) > 100:\n raise ValueError(\"Invalid value for `last_modified_by`, length must be less than or equal to `100`\")\n\n self._last_modified_by = last_modified_by", "def last_modified_by(self):\n return self._last_modified_by", "def last_modified_by(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by\")", "def last_modified_by(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by\")", "def last_modified_by(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by\")", "def last_modified_by(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by\")", "def last_modified_by(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by\")", "def last_modified_by(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by\")", "def last_modified_by(self) -> str:\n return pulumi.get(self, \"last_modified_by\")", "def last_modified(self, last_modified):\n\n self._last_modified = last_modified", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def updated_by(self, updated_by):\n\n self._updated_by = updated_by", "def last_modified_by(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"last_modified_by\")", "def last_reviewed_by(self, last_reviewed_by):\n\n self._last_reviewed_by = last_reviewed_by", "def last_modified_on(self, last_modified_on):\n\n self._last_modified_on = last_modified_on", "def setLastModified(when):", "def last_updated_user(self, last_updated_user):\n self._last_updated_user = last_updated_user", "def last_modification(self, last_modification):\n\n self._last_modification = last_modification", "def last_updated(self, last_updated: str):\n\n self._last_updated = last_updated", "def last_modified_dts(self, last_modified_dts):\n\n self._last_modified_dts = last_modified_dts", "def updated_by(self) -> Optional[pulumi.Input['UserInfoArgs']]:\n return pulumi.get(self, \"updated_by\")", "def last_modified_by_type(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by_type\")", "def last_modified_by_type(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by_type\")", "def last_modified_by_type(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by_type\")", "def last_modified_by_type(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by_type\")", "def last_modified_by_type(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by_type\")", "def last_modified_by_type(self) -> Optional[str]:\n return pulumi.get(self, \"last_modified_by_type\")", "def last_modified_by_type(self) -> str:\n return pulumi.get(self, \"last_modified_by_type\")" ]
[ "0.70193124", "0.61000824", "0.6093348", "0.6093348", "0.6093348", "0.6093348", "0.6093348", "0.6093348", "0.6086628", "0.60032386", "0.59836704", "0.59836704", "0.59836704", "0.5972933", "0.5936987", "0.58601046", "0.5848984", "0.54824483", "0.5478924", "0.5438711", "0.5376531", "0.5350021", "0.527146", "0.52665335", "0.52665335", "0.52665335", "0.52665335", "0.52665335", "0.52665335", "0.52418387" ]
0.73591125
1