query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
listlengths
30
30
negative_scores
listlengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Create an asynchronous event object.
def create_event() -> abc.Event: return get_asynclib().Event()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def createEvent(self, event: Event) -> None:", "async def async_event(self, event: str, *args, **kwargs):\n for cb in self.event_handlers[event]:\n asyncio.ensure_future(cb(*args, **kwargs), loop=self.loop)", "def create_new_event(self):\n pass", "async def start_events_async(self) -> None:\n raise NotImplementedError(\"start_events_async must be implemented for {}\".format(self.__class__.__name__))", "def asyncinit(cls):\r\n __new__ = cls.__new__\r\n\r\n async def init(obj, *arg, **kwarg):\r\n await obj.__init__(*arg, **kwarg)\r\n return obj\r\n\r\n def new(cls, *arg, **kwarg):\r\n obj = __new__(cls, *arg, **kwarg)\r\n coro = init(obj, *arg, **kwarg)\r\n return coro\r\n\r\n cls.__new__ = new\r\n return cls", "def event(self, event_name):\r\n return Event(self, event_name)", "def Event(name):\n c = new_class(name, bases=(_Event,))(name)\n return c", "def _make_cpp_event(type, target):\n return EventCpp(type, target)", "def get_next_event(self):\n yield asleep(0.000001)\n event = AsyncEvent(seq=self.event_seq, details='foo')\n self.event_seq += 1\n returnValue(event)", "def event_object(self):\n return gevent.event.Event()", "async def eventworker_factory(cot_url: str, event_queue: asyncio.Queue) -> pytak.Worker:\n reader, writer = await protocol_factory(cot_url)\n return pytak.EventWorker(event_queue, writer)", "def create_event(self, event_type):\n setattr(self, event_type, lambda *args, **kwargs: None)\n self.register_event_type(event_type)", "def create_event(self, **kwargs):\n events = self.variables['events']\n events.append(kwargs)\n self.variables['events'] = events", "def event(self, coro):\n if not asyncio.iscoroutinefunction(coro):\n raise TypeError('Event must be a coroutine.')\n\n setattr(self, coro.__name__, coro)\n self._listeners[coro.__name__] = coro\n return coro", "def create_event():\n json_data = request.get_json()\n data, error = EventSchema().load(json_data)\n if error:\n return make_response(jsonify({\"error\": error}), 400)\n oEvent = Event.create(data)\n return make_response(jsonify(oEvent.as_dict()))", "def create_event(self, event_name: str, **kwargs: Any) -> CustomEvent:\n return CustomEvent(event_name, self.handler_properties, **kwargs)", "def new_event(self, subject=None):\n return self.event_constructor(parent=self, subject=subject)", "def create_event_object(self,\n event_type,\n code,\n value,\n timeval=None):\n if not timeval:\n timeval = self.__get_timeval()\n try:\n event_code = self.manager.codes['type_codes'][event_type]\n except KeyError:\n raise UnknownEventType(\n \"We don't know what kind of event a %s is.\" % event_type)\n event = struct.pack(EVENT_FORMAT,\n timeval[0],\n timeval[1],\n event_code,\n code,\n value)\n return event", "def get_ready_event(self):\n ev = Event()\n ev.set()\n return ev", "def create_event_object(self,\n event_type,\n code,\n value,\n timeval=None):\n if not timeval:\n self.update_timeval()\n timeval = self.timeval\n try:\n event_code = self.type_codes[event_type]\n except KeyError:\n raise UnknownEventType(\n \"We don't know what kind of event a %s is.\" % event_type)\n\n event = struct.pack(EVENT_FORMAT,\n timeval[0],\n timeval[1],\n event_code,\n code,\n value)\n return event", "def __call__(self, **kwargs):\n kwargs.setdefault('timeout', self.timeout)\n kwargs.setdefault('send_line', self.send_line)\n kwargs['process_results'] = self.process_results\n return async_events(self.context, self.events, **kwargs)", "def async_on_event(self, target: Callable[..., Awaitable]) -> None:\n\n async def _async_on_event(event_data: dict):\n \"\"\"Act on the Message object.\"\"\"\n await self._watchdog.trigger()\n message = websocket_event_from_raw_data(event_data)\n await target(message)\n\n self._sio.on(\"event\", _async_on_event, namespace=self._namespace)", "def build_future(self, compat = True, asyncio = True):\r\n\r\n # creates a normal future object, setting the current loop (global) as\r\n # the loop, then returns the future to the caller method\r\n loop = self.get_loop(compat = compat, asyncio = asyncio)\r\n future = asynchronous.Future(loop = loop)\r\n return future", "def _make_event(self, tv_sec, tv_usec, ev_type, code, value):\n event_type = self.manager.get_event_type(ev_type)\n eventinfo = {\n \"ev_type\": event_type,\n \"state\": value,\n \"timestamp\": tv_sec + (tv_usec / 1000000),\n \"code\": self.manager.get_event_string(event_type, code)\n }\n\n return InputEvent(self, eventinfo)", "def event(self, id):\r\n return Event(self, id)", "def test_future_event(self):\n pass", "def get():\n _processEvents()\n return _event_generator()", "def init_events_transmitter():\n class StatusListener(SubscribeCallback):\n def status(self, pubnub, status):\n event = \"unknown\"\n\n if status.operation == PNOperationType.PNSubscribeOperation \\\n and status.category == PNStatusCategory.PNConnectedCategory:\n event = \"Connect\"\n elif status.operation == PNOperationType.PNUnsubscribeOperation \\\n and status.category == PNStatusCategory.PNAcknowledgmentCategory:\n event = \"Unsubscribe\"\n\n asyncio.ensure_future(pubnub.publish().channel('status-' + APP_KEY).message({\n \"event\": event\n }).future(), loop=loop)\n\n def presence(self, pubnub, presence):\n pass\n\n def message(self, pubnub, message):\n pass\n\n listener = StatusListener()\n pubnub.add_listener(listener)", "def as_future(d: Deferred) -> asyncio.Future:\n return d.asFuture(asyncio.get_event_loop())", "def create_event(wrapped, instance, args, kwargs, start_time, response,\n exception):\n event = PyMongoEvent(\n wrapped,\n instance,\n args,\n kwargs,\n start_time,\n response,\n exception\n )\n trace_factory.add_event(event)" ]
[ "0.77821743", "0.63274366", "0.6155302", "0.59984094", "0.599693", "0.59448147", "0.5877639", "0.585124", "0.5843828", "0.58366704", "0.58196694", "0.5744085", "0.57186764", "0.56930447", "0.5687933", "0.5659814", "0.5600452", "0.55845153", "0.55748785", "0.55728734", "0.55705607", "0.55516875", "0.5539137", "0.5513016", "0.5500335", "0.5453885", "0.5442664", "0.5429406", "0.54056394", "0.54042834" ]
0.8588365
0
Create an asynchronous semaphore.
def create_semaphore(value: int, *, max_value: Optional[int] = None) -> Semaphore: return Semaphore(value, max_value=max_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.clock = 0\n self.sem = asyncio.Semaphore(1)", "def get_semaphore(name, pid, time_out=None, pause=0.1):\n\n semaphore_key = get_semaphore_key(name, pid)\n while True:\n try:\n return Semaphore(semaphore_key)\n\n except ExistentialError:\n sleep(pause)\n\n if time_out is not None:\n time_out -= pause\n if time_out <= 0.0:\n return None", "async def process_with_semaphore(item):\n async with readahead_sem:\n async with sem:\n return await process_one(item)", "def acquire(self, blocking=True, shared=False):", "def release(self) -> DeprecatedAwaitable:\n if self._max_value is not None and self._value == self._max_value:\n raise ValueError('semaphore released too many times')\n\n self._value += 1\n if self._waiters:\n self._waiters.popleft().set()\n\n return DeprecatedAwaitable(self.release)", "def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n self.data = None\n self.lock = threading.Semaphore(0)", "def task(t, msg, sem):\n if not t:\n while True:\n threading.Event().wait(1)\n sem.acquire()\n print(msg)\n sem.release()\n \n threading.Event().wait(t)\n sem.acquire()\n print(msg)\n sem.release()", "def asyncinit(cls):\r\n __new__ = cls.__new__\r\n\r\n async def init(obj, *arg, **kwarg):\r\n await obj.__init__(*arg, **kwarg)\r\n return obj\r\n\r\n def new(cls, *arg, **kwarg):\r\n obj = __new__(cls, *arg, **kwarg)\r\n coro = init(obj, *arg, **kwarg)\r\n return coro\r\n\r\n cls.__new__ = new\r\n return cls", "def active(value):\r\n self.context.active = threading.BoundedSemaphore(value=value)", "def test_non_blocking_acquire():\r\n with throttle_client(b\"[semaphores]\\nA=1\") as client:\r\n first = client.new_peer(expires_in=timedelta(minutes=1))\r\n second = client.new_peer(expires_in=timedelta(minutes=1))\r\n # Acquire first lease\r\n client.acquire(first, \"A\")\r\n # Second lease is pending, because we still hold first\r\n assert not client.acquire(second, \"A\")\r\n # A request to the server is also telling us so\r\n assert not client.is_acquired(second)\r\n client.release(first)\r\n # After releasing the first lease the second lease should no longer be\r\n # pending\r\n assert client.is_acquired(second)", "def wait(self):\n\n self.sem = threading.Semaphore(0)\n self.sem.acquire()", "async def limit_client(host, port, loop, sem):\n async with sem:\n return await client(host, port, loop)", "def generate_ripe_request_tokens(sema: mp.Semaphore, limit: int, finish_event: threading.Event):\n logger.debug('generate thread started')\n while not finish_event.is_set():\n time.sleep(2 / limit)\n try:\n sema.release()\n sema.release()\n except ValueError:\n continue\n logger.debug('generate thread stoopped')", "def create_lock() -> Lock:\n return Lock()", "def create_event() -> abc.Event:\n return get_asynclib().Event()", "async def __aenter__(self):\n self.acquired = True\n return self", "def test_acquire():\r\n with throttle_client(b\"[semaphores]\\nA=1\") as client:\r\n one = client.new_peer(expires_in=timedelta(minutes=1))\r\n two = client.new_peer(expires_in=timedelta(minutes=1))\r\n # Acquire first lease\r\n client.acquire(one, \"A\")\r\n # Second is pending\r\n assert not client.acquire(two, \"A\", block_for=timedelta(milliseconds=10))\r\n # Release one, so second is acquired\r\n client.release(one)\r\n assert client.acquire(two, \"A\")", "def run(self):\n # Don't call this from the thread which it represents.\n assert eventlet.corolocal.get_ident() != self.id\n self.caller_sem = Semaphore(0)\n self.my_sem.release()\n self.caller_sem.acquire() # Wait for it to finish.", "async def __aenter__(self):\n assert self._task is None\n self._task = self._loop.create_task(self._run())\n return self", "def run_blocking(promise: Coroutine[Any, Any, _T]) -> _T:\n loop = asyncio.get_event_loop()\n return loop.run_until_complete(promise)", "def create_async_task(self, loop: AbstractEventLoop) -> TaskAwaitable:\n self._agent.runtime.set_loop(loop)\n if not isinstance(self._agent.runtime, AsyncRuntime): # pragma: nocover\n raise ValueError(\n \"Agent runtime is not async compatible. Please use runtime_mode=async\"\n )\n return loop.create_task(self._agent.runtime.start_and_wait_completed()) # type: ignore", "def test_lock_blocks():\r\n with throttle_client(b\"[semaphores]\\nA=1\") as client:\r\n first = client.new_peer(expires_in=timedelta(minutes=1))\r\n second = client.new_peer(expires_in=timedelta(minutes=1))\r\n # Acquire first lock\r\n client.acquire(first, \"A\")\r\n\r\n acquired = False\r\n\r\n def wait_for_second_lock():\r\n nonlocal acquired\r\n acquired = client.acquire(\r\n second, semaphore=\"A\", block_for=timedelta(seconds=2)\r\n )\r\n\r\n t = Thread(target=wait_for_second_lock)\r\n t.start()\r\n client.release(first)\r\n t.join()\r\n # Second lock is no longer pending, because we released first and t is finished\r\n assert acquired", "async def acquire(self) -> None:\n try:\n self.acquire_nowait()\n except WouldBlock:\n event = create_event()\n self._waiters.append(event)\n try:\n await event.wait()\n except BaseException:\n if not event.is_set():\n self._waiters.remove(event)\n\n raise\n\n self.acquire_nowait()", "def acquire(self, blocking=True, timeout=None):\n # pylint:disable=too-many-return-statements,too-many-branches\n # Sadly, the body of this method is rather complicated.\n if self._multithreaded is _UNSET:\n self._multithreaded = self._get_thread_ident()\n elif self._multithreaded != self._get_thread_ident():\n self._multithreaded = _MULTI\n\n # We conceptually now belong to the hub of the thread that\n # called this, whether or not we have to block. Note that we\n # cannot force it to be created yet, because Semaphore is used\n # by importlib.ModuleLock which is used when importing the hub\n # itself! This also checks for cross-thread issues.\n invalid_thread_use = None\n try:\n self._capture_hub(False)\n except InvalidThreadUseError as e:\n # My hub belongs to some other thread. We didn't release the GIL/object lock\n # by raising the exception, so we know this is still true.\n invalid_thread_use = e.args\n e = None\n if not self.counter and blocking:\n # We would need to block. So coordinate with the main hub.\n return self.__acquire_from_other_thread(invalid_thread_use, blocking, timeout)\n\n if self.counter > 0:\n self.counter -= 1\n return True\n\n if not blocking:\n return False\n\n if self._multithreaded is not _MULTI and self.hub is None: # pylint:disable=access-member-before-definition\n self.hub = get_hub() # pylint:disable=attribute-defined-outside-init\n\n if self.hub is None and not invalid_thread_use:\n # Someone else is holding us. There's not a hub here,\n # nor is there a hub in that thread. We'll need to use regular locks.\n # This will be unfair to yet a third thread that tries to use us with greenlets.\n return self.__acquire_from_other_thread(\n (None, None, self._getcurrent(), \"NoHubs\"),\n blocking,\n timeout\n )\n\n # self._wait may drop both the GIL and the _lock_lock.\n # By the time we regain control, both have been reacquired.\n try:\n success = self._wait(timeout)\n except LoopExit as ex:\n args = ex.args\n ex = None\n if self.counter:\n success = True\n else:\n # Avoid using ex.hub property to keep holding the GIL\n if len(args) == 3 and args[1].main_hub:\n # The main hub, meaning the main thread. We probably can do nothing with this.\n raise\n return self.__acquire_from_other_thread(\n (self.hub, get_hub_if_exists(), self._getcurrent(), \"LoopExit\"),\n blocking,\n timeout)\n\n if not success:\n assert timeout is not None\n # Our timer expired.\n return False\n\n # Neither our timer or another one expired, so we blocked until\n # awoke. Therefore, the counter is ours\n assert self.counter > 0, (self.counter, blocking, timeout, success,)\n self.counter -= 1\n return True", "def run(self): \n\n #acquire the semaphore \n global frame, wheel \n while True:\n self.frame.acquire() \n frame -= 1\n print \"Consumer(%s):consume frame, now frame:%s, wheels:%s\\n\" %(self.name, frame,wheel) \n self.empty.release()\n self.s1.release()\n\n self.wheel.acquire()\n self.wheel.acquire()\n wheel -= 2\n print \"Consumer(%s):consume wheels, now frame:%s, wheels:%s\\n\" %(self.name, frame,wheel) \n self.empty.release()\n self.empty.release()\n self.s2.release()\n self.s2.release()\n time.sleep(2) \n print \"!!!!creat a car\\n\"", "def __await__(self):\n\n async def _init():\n if not self._active:\n await self._setup()\n self._active = True\n self._awaited = True\n return self\n\n return _init().__await__()", "async def bound_fetch(sem, url, session):\n async with sem:\n await fetch(url, session)", "def resourcemanager(acquire, release):\n @contextmanager\n def manager(*args):\n resource = acquire(*args)\n try:\n yield resource\n finally:\n release(resource)\n return manager", "def async_manager(self):\n while True:\n (request, args, kwargs) = self.pool.get()\n if request is None:\n break\n request(*args, **kwargs)", "def sinaliza_passageiros(self, sem):\n for i in range(self.counter.max):\n sem.release()\n sleep(random.randrange(0,2))" ]
[ "0.672445", "0.5945115", "0.57099426", "0.5578381", "0.55409366", "0.54068184", "0.53410405", "0.52689165", "0.52660245", "0.5145877", "0.51016116", "0.5043989", "0.4977009", "0.4974455", "0.4938569", "0.4906048", "0.48825568", "0.48692894", "0.48491052", "0.48244274", "0.48015106", "0.47894627", "0.47814217", "0.47582015", "0.47529233", "0.47507071", "0.4730082", "0.47129795", "0.47044635", "0.4686018" ]
0.7126209
0
Create a capacity limiter.
def create_capacity_limiter(total_tokens: float) -> abc.CapacityLimiter: return get_asynclib().CapacityLimiter(total_tokens)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_capacity(self, m, comp, prod_name):\n name = comp.name\n cap_res = comp.get_capacity_var() # name of resource that defines capacity\n r = m.resource_index_map[comp][cap_res] # production index of the governing resource\n # production is always lower than capacity\n ## NOTE get_capacity returns (data, meta) and data is dict\n ## TODO does this work with, e.g., ARMA-based capacities?\n ### -> \"time\" is stored on \"m\" and could be used to correctly evaluate the capacity\n cap = comp.get_capacity(None, None, None, None)[0][cap_res] # value of capacity limit (units of governing resource)\n rule = partial(self._capacity_rule, prod_name, r, cap)\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{c}_{r}_capacity_constr'.format(c=name, r=cap_res), constr)\n # minimum production\n print('DEBUGG dispatchable?', comp.name, comp.is_dispatchable())\n if comp.is_dispatchable() == 'fixed':\n minimum = cap\n var = getattr(m, prod_name)\n values = var.get_values()\n for k in values:\n values[k] = cap\n var.set_values(values)\n else:\n minimum = 0 # -> for now just use 0, but fix this! XXX\n print('DEBUGG ... min:', minimum)\n rule = partial(self._min_prod_rule, prod_name, r, cap, minimum)\n constr = pyo.Constraint(m.T, rule=rule)\n setattr(m, '{c}_{r}_minprod_constr'.format(c=name, r=cap_res), constr)", "def new_capacity_rule(mod, g, p):\n return 0", "def __init__(self, capacity, units=0):\n self.capacity = capacity\n self.units = units", "def new_capacity_rule(mod, prj, prd):\n return 0", "def capacity(self) -> Capacity:\n raw = self._call('GET', 'capacity')\n return Capacity.parse_raw(raw)", "def __init__(self, capacity):\n self.experiences = RingBuf(capacity)", "def capacity(self):\n raise NotImplementedError()", "def __init__(self, capacity, initial):\n\t\tself.capacity = capacity\n\t\tself.amount = initial", "def capacitygroup_create(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_create(cmd_ctx, cpc, options))", "def set_capacity(self, cap):\n return self.get_interaction().set_capacity(cap)", "def __init__(__self__, *,\n capacity: Optional[int] = None,\n name: Optional[str] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is not None:\n pulumi.set(__self__, \"name\", name)", "def set_capacity(self, capacity):\r\n params = {\r\n 'AutoScalingGroupName' : self.name,\r\n 'DesiredCapacity' : capacity,\r\n }\r\n req = self.connection.get_object('SetDesiredCapacity', params,\r\n Request)\r\n self.connection.last_request = req\r\n return req", "def createLimit(name, maxValue):\n return Limit(Cuebot.getStub('limit').Create(\n limit_pb2.LimitCreateRequest(name=name, max_value=maxValue), timeout=Cuebot.Timeout))", "def capacitygroup_group():", "def capacity(self, value: typing.Union[str, int, None]):\n self._properties[\"capacity\"] = _types.integer_or_string(value)", "def __init__(__self__, *,\n capacity: Optional[pulumi.Input[int]] = None,\n name: Optional[pulumi.Input[str]] = None,\n tier: Optional[pulumi.Input[str]] = None):\n if capacity is not None:\n pulumi.set(__self__, \"capacity\", capacity)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if tier is not None:\n pulumi.set(__self__, \"tier\", tier)", "def Capacity(self) -> int:", "def knapsack(items, capacity):\r\n pass", "def capacity_used(self):\n raise NotImplementedError()", "def capacity_rule(mod, g, p):\n return mod.stor_spec_power_capacity_mw[g, p]", "def cfCreate(self, key, capacity, expansion=None, bucket_size=None, max_iterations=None):\n params = [key, capacity]\n self.appendExpansion(params, expansion)\n self.appendBucketSize(params, bucket_size)\n self.appendMaxIterations(params, max_iterations)\n\n return self.execute_command(self.CF_RESERVE, *params)", "def set_capacity(self, cap):\n self._capacity.type = 'value'\n self._capacity._value = float(cap) # TODO getter/setter", "def _check_capacity_limit(self, res, amt, balance, meta, raven_vars, dispatch, t, level):\n # note \"amt\" has units of AMOUNT not RATE (resource, not resource per second)\n sign = np.sign(amt)\n # are we storing or providing?\n #print('DEBUGG supposed current level:', level)\n if sign < 0:\n # we are being asked to consume some\n cap, meta = self.get_capacity(meta, raven_vars, dispatch, t)\n available_amount = cap[res] - level\n #print('Supposed Capacity, Only calculated ins sign<0 (being asked to consumer)',cap)\n else:\n # we are being asked to produce some\n available_amount = level\n # the amount we can consume is the minimum of the requested or what's available\n delta = sign * min(available_amount, abs(amt))\n return {res: delta}, meta", "def capacity(self):\n return self._cap", "def __init__(self, hard_limit=2000):\n self.hard_limit = hard_limit", "def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n for count in range(capacity):\r\n self._items.append(fillValue)", "def bandwidth_limit_rule_create(request, policy_id, **kwargs):\n body = {'bandwidth_limit_rule': kwargs}\n if 'tenant_id' not in kwargs:\n kwargs['tenant_id'] = request.user.project_id\n body = {'bandwidth_limit_rule': kwargs}\n rule = 'bandwidth_limit_rule'\n bandwidth_limit_rule = neutronclient(request)\\\n .create_bandwidth_limit_rule(policy_id, body).get(rule)\n return BandwidthLimitRule(bandwidth_limit_rule)", "def test_choosingPerformerWithLocalCapacity(self):\n # Give it some local capacity.\n\n # In this case we want pcp to have a workerPool, so create a new pcp\n # for this test\n self.pcp = ControllerQueue(None, None)\n wlf = self.pcp.workerListenerFactory()\n proto = wlf.buildProtocol(None)\n proto.makeConnection(StringTransport())\n # Sanity check.\n self.assertEqual(len(self.pcp.workerPool.workers), 1)\n self.assertEqual(self.pcp.workerPool.hasAvailableCapacity(), True)\n # Now it has some capacity.\n self.checkPerformer(WorkerConnectionPool)", "def __init__(self, capacity, fillValue=None):\r\n self._items = list()\r\n self._logicalSize = 0\r\n # Track the capacity and fill value for adjustments later\r\n self._capacity = capacity\r\n self._fillValue = fillValue\r\n for count in range(capacity):\r\n self._items.append(fillValue)", "def capacity(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"capacity\")" ]
[ "0.69381076", "0.67714393", "0.6637287", "0.66100174", "0.6550799", "0.65026176", "0.6429049", "0.6293804", "0.62928146", "0.6212927", "0.6205634", "0.61527216", "0.6143372", "0.6116694", "0.6098608", "0.6046397", "0.60152835", "0.6004185", "0.59930545", "0.59619427", "0.5949021", "0.5913436", "0.5900569", "0.5870388", "0.5865934", "0.58644944", "0.5859578", "0.5842417", "0.5828409", "0.58107626" ]
0.7675186
0
Caulculate the wall panels area from CC
def walls_cc(lenght, width, wall_height=3, roof_height=4): a = min(0.1*lenght, 0.1*width, 0.4*(wall_height+0.5*roof_height)) trian = 0.5*width*roof_height trian_5 = 0.5*a*a*(wall_height/roof_height) # trian_4 = trian - 2*trian_5 area = (lenght + width)*2*wall_height + 2*trian area_5 = 8*a*wall_height + 4*trian_5 area_4 = area - area_5 return area, area_4, area_5
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update(self):\n self.center_x += self.change_x\n self.center_y += self.change_y\n\n # boundary for the sides of the screen\n if self.left < 0:\n self.left = 0\n if self.right > settings.WIDTH:\n self.right = settings.WIDTH\n\n # boundary for the top and bottom of the screen\n if self.bottom < 0:\n self.bottom = 3\n if self.top > settings.HEIGHT:\n self.top = settings.HEIGHT\n\n # boundary of wall left up\n if (self.right > 185 and self.left < 180 and self.top > 338 and\n self.bottom < 464):\n self.right = 184\n if (self.left < 212 and self.left > 190 and self.bottom < 435 and\n self.top > 338):\n self.left = 212\n if (self.right > 185 and self.left < 312 and self.bottom < 464 and\n self.top > 460):\n self.bottom = 464\n if (self.top > 435 and self.top < 450 and self.left >= 212 and\n self.left < 311):\n self.top = 435\n if (self.top > 336 and self.bottom < 350 and self.left < 211 and\n self.right > 185):\n self.top = 336\n if (self.left < 312 and self.right > 290 and self.bottom < 464 and\n self.top > 436):\n self.left = 312\n\n # boundary of wall right up\n if (self.right > 590 and self.left < 586 and self.top > 338 and\n self.bottom < 464):\n self.left = 586\n if (self.right > 560 and self.right < 582 and self.bottom < 435 and\n self.top > 338):\n self.right = 560\n if (self.right > 462 and self.left < 586 and self.bottom < 464 and\n self.top > 460):\n self.bottom = 464\n if (self.top > 435 and self.top < 450 and self.right <= 560 and\n self.right > 462):\n self.top = 435\n if (self.top > 336 and self.bottom < 350 and self.left < 586 and\n self.right > 561):\n self.top = 336\n if (self.left < 440 and self.right > 462 and self.bottom < 464 and\n self.top > 436):\n self.right = 462\n\n # boundary of wall left down\n if (self.right > 185 and self.left < 180 and self.top > 88 and\n self.bottom < 213):\n self.right = 184\n if (self.left < 212 and self.right > 200 and self.top > 111 and\n self.bottom < 213):\n self.left = 212\n if (self.right > 185 and self.left < 312 and self.top > 88 and self.top < 100):\n self.top = 88\n if (self.bottom < 111 and self.bottom > 100 and self.left >= 212 and\n self.left < 311):\n self.bottom = 112\n if (self.top > 336 and self.bottom < 213 and self.left < 211 and\n self.right > 185):\n self.bottom = 213\n if (self.left < 312 and self.right > 290 and self.top > 88 and\n self.bottom < 111):\n self.left = 312\n\n # boundary of wall lef right down\n if (self.right > 590 and self.left < 586 and self.top > 88 and\n self.bottom < 213):\n self.left = 586\n if (self.right > 560 and self.left < 582 and self.top > 111 and\n self.bottom < 213):\n self.right = 560\n if (self.right > 462 and self.left < 586 and self.top > 88 and self.top < 100):\n self.top = 88\n if (self.bottom < 111 and self.bottom > 100 and self.right <= 560 and\n self.right > 462):\n self.bottom = 112\n if (self.top > 336 and self.bottom < 213 and self.left < 586 and\n self.right > 561):\n self.bottom = 213\n if (self.left < 440 and self.right > 462 and self.top > 88 and\n self.bottom < 111):\n self.right = 462", "def Intarea( xc, yc, r, x0, x1, y0, y1):\n\n#\n# Shift the objects so that the circle is at the origin.\n#\n x0 = x0 - xc\n y0 = y0 - yc\n x1 = x1 - xc\n y1 = y1 - yc\n\n return Oneside( x1, y0, y1, r ) + Oneside( y1, -x1, -x0, r ) +\\\n Oneside( -x0, -y1, -y0, r ) + Oneside( -y0, x0, x1, r )", "def area(self):\n return _cantera.wall_area(self.__wall_id)", "def compute_area(self):\r\n\r\n \"\"\"Косое произведение векторов\r\n A = (x2-x1; y2-y1; z2-z1)\r\n B = (x3-x1; y3-y1; z3-z1)\r\n S = 0.5*sqrt((Ay*Bz - Az*By)^2 + (Az*Bx - Ax*Bz)^2 + (Ax*By - Ay*Bx)^2 )\r\n \"\"\"\r\n a_x = self.x2 - self.x1\r\n a_y = self.y2 - self.y1\r\n a_z = self.z2 - self.z1\r\n\r\n b_x = self.x3 - self.x1\r\n b_y = self.y3 - self.y1\r\n b_z = self.z3 - self.z1\r\n\r\n self.area = 0.5 * math.sqrt((a_y * b_z - a_z * b_y) ** 2 + (a_z * b_x - a_x * b_z) ** 2 + (a_x * b_y - a_y * b_x) ** 2)\r\n\r\n \"\"\"По теореме Герона\"\"\"\r\n # a = math.sqrt((self.x1-self.x2)**2 + (self.y1-self.y2)**2 + (self.z1-self.z2)**2)\r\n # b = math.sqrt((self.x1-self.x3)**2 + (self.y1-self.y3)**2 + (self.z1-self.z3)**2)\r\n # c = math.sqrt((self.x2-self.x3)**2 + (self.y2-self.y3)**2 + (self.z2-self.z3)**2)\r\n # p = 0.5 * (a + b + c)\r\n # self.area = math.sqrt(p * (p - a) * (p - b) * (p - c))\r", "def roof_cc(lenght, width, overhang=1, wall_height=3, roof_height=4):\n a = min(0.1 * lenght, 0.1 * width, 0.4 * (wall_height + 0.5 * roof_height))\n\n area = (lenght + overhang)*(width + overhang)\n area_3 = 8*a**2\n area_1 = (lenght - 2)*(width - 4*a)\n area_2 = area - area_3 - area_1\n return area, area_1, area_2, area_3", "def detectWallCollision(self): \n if self.posn_x > cw - self.ball_width: # Collision with right-hand container wall. \n self.velocity_x = -self.velocity_x * self.coef_restitution # reverse direction. \n self.posn_x = cw - self.ball_width * 1.1 # anti-stick to the wall \n if self.posn_x < 1: # Collision with left-hand wall. \n self.velocity_x = -self.velocity_x * self.coef_restitution \n self.posn_x = 2 # anti-stick to the wall \n if self.posn_y < self.ball_height: # Collision with ceiling. \n self.velocity_y = -self.velocity_y * self.coef_restitution \n self.posn_y = self.ball_height * 1.1 # ceiling collision anti-stick \n if self.posn_y > ch - self.ball_height * 1.1 : # Floor collision. \n self.velocity_y = - self.velocity_y * self.coef_restitution \n self.posn_y = ch - self.ball_height * 1.1 # anti-stick. Prevents out-of-bounds ball loss (stickiness) ", "def area(self):\n return (self.baselength1 + self.baselength2)*self.height/2", "def set_channel_walls(self,walls=['left','right','top','bottom']):\n solid_list_a = np.empty(0).flatten()\n solid_list_b = np.empty(0).flatten()\n solid_list_c = np.empty(0).flatten()\n solid_list_d = np.empty(0).flatten()\n\n for w in walls:\n if w=='right':\n solid_list_a = np.array(np.where((self.x==0.))).flatten()\n elif w=='left':\n solid_list_b = np.array(np.where((self.x == self.Lx_p))).flatten()\n elif w=='top':\n solid_list_d = np.array(np.where((self.y == self.Ly_p))).flatten()\n elif w=='bottom':\n solid_list_c = np.array(np.where((self.y == 0.))).flatten()\n\n solid_list = np.array(np.union1d(solid_list_a,solid_list_b)); \n solid_list = np.array(np.union1d(solid_list,solid_list_c))\n self.solid_list = np.array(np.union1d(solid_list,solid_list_d))", "def set_channel_walls(self,walls=['left','right','top','bottom']):\n solid_list_a = np.empty(0).flatten()\n solid_list_b = np.empty(0).flatten()\n solid_list_c = np.empty(0).flatten()\n solid_list_d = np.empty(0).flatten()\n\n for w in walls:\n if w=='right':\n solid_list_a = np.array(np.where((self.x==0.))).flatten()\n elif w=='left':\n solid_list_b = np.array(np.where((self.x == self.Lx_p))).flatten()\n elif w=='top':\n solid_list_d = np.array(np.where((self.y == self.Ly_p))).flatten()\n elif w=='bottom':\n solid_list_c = np.array(np.where((self.y == 0.))).flatten()\n\n solid_list = np.array(np.union1d(solid_list_a,solid_list_b)); \n solid_list = np.array(np.union1d(solid_list,solid_list_c))\n self.solid_list = np.array(np.union1d(solid_list,solid_list_d))", "def area(self):", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def area(self):\n\t\treturn self.height * self.height", "def __bcc_top_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def calculatearea(self):\r\n return self.width * self.height", "def surface_area_of_cube(side):\n return side", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def __bcc_left_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def area(self):\n area = 0\n\n for room in self.rooms:\n area += room.polygon.area()\n\n for wall in self.walls:\n area += wall.polygon.area()\n\n return area", "def area(self):\n\t\treturn self.width * self.height", "def area(self):\n\t\treturn self.width() * self.height()", "def area(self):\n return self.length*self.length", "def _area(self):\n self.area = 0.0\n for sail in self.sails:\n self.area += sail.area", "def corridor(x,z, emap, width=10, length=10, height=10, details=None, walls=\"ns\", name=\"wall\", mergeshape=None):\r\n global wallnum\r\n n = z + width / 2\r\n s = z - width / 2\r\n e = x + length / 2\r\n w = x - length / 2\r\n\r\n solid_objects = []\r\n\r\n if \"n\" in walls:\r\n # TODO: abstract out the mostly-duplicate code in these cases...\r\n nwall = SolidObject(name+str(wallnum),\r\n Size(length, height, 1),\r\n Position(x, emap.calcHeight(x, z) + height / 2, n-0.5), 0)\r\n solid_objects.append(nwall)\r\n nwallmodel = createMyCuboid(nwall.w() * 2, nwall.h() * 2, nwall.d() * 2,\r\n name=name+str(wallnum),\r\n x=nwall.x(),y=nwall.y(),z=nwall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(nwallmodel)\r\n else:\r\n nwall.setmodel(nwallmodel, details)\r\n\r\n\r\n wallnum += 1\r\n\r\n if \"s\" in walls:\r\n swall = SolidObject(name+str(wallnum), Size(length, height, 1), Position(x, emap.calcHeight(x, z)+height / 2, s+0.5), 0)\r\n solid_objects.append(swall)\r\n swallmodel = createMyCuboid(swall.w()*2, swall.h()*2, swall.d()*2,\r\n name=name+str(wallnum),\r\n x=swall.x(), y=swall.y(), z=swall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0,cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(swallmodel)\r\n else:\r\n swall.setmodel(swallmodel, details)\r\n\r\n wallnum += 1\r\n\r\n if \"e\" in walls:\r\n ewall = SolidObject(name+str(wallnum), Size(1, height, width), Position(e-0.5, emap.calcHeight(x, z)+height / 2, z), 0)\r\n solid_objects.append(ewall)\r\n ewallmodel = createMyCuboid(ewall.w()*2, ewall.h()*2, ewall.d()*2,\r\n name=name+str(wallnum),\r\n x=ewall.x(), y=ewall.y(), z=ewall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(ewallmodel)\r\n else:\r\n ewall.setmodel(ewallmodel, details)\r\n\r\n wallnum += 1\r\n\r\n if \"w\" in walls:\r\n wwall = SolidObject(name+str(wallnum), Size(1, height, width), Position(w+0.5, emap.calcHeight(x, z)+height / 2, z), 0)\r\n solid_objects.append(wwall)\r\n wwallmodel = createMyCuboid(wwall.w()*2, wwall.h()*2, wwall.d()*2,\r\n name=name+str(wallnum),\r\n x=wwall.x(), y=wwall.y(), z=wwall.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(wwallmodel)\r\n else:\r\n wwall.setmodel(wwallmodel, details)\r\n wallnum += 1\r\n\r\n if \"o\" not in walls:\r\n ceiling = SolidObject(name+str(wallnum), Size(length, 1, width), Position(x, emap.calcHeight(x, z)+height+0.5, z), 0)\r\n solid_objects.append(ceiling)\r\n ceilingmodel = createMyCuboid(ceiling.w()*2, ceiling.h()*2, ceiling.d()*2,\r\n name=name+str(wallnum),\r\n x=ceiling.x(), y=ceiling.y(), z=ceiling.z(),\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0)\r\n if mergeshape:\r\n mergeshape.add(ceilingmodel)\r\n else:\r\n ceiling.setmodel(ceilingmodel, details)\r\n\r\n wallnum += 1\r\n\r\n return solid_objects", "def area(self):\n return(self.__width * self.__height)", "def area(self):\r\n return self.width * self.height", "def cube_area(edge : number) -> number:\n area = 6*edge*edge\n\n return area", "def area_rect(w, h):\n return w * h", "def area(self):\n return self.width*self.height", "def __bcc_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def area(self):\n return self.__size ** 2" ]
[ "0.6238483", "0.62136805", "0.6182634", "0.6129454", "0.6088007", "0.5912483", "0.5898643", "0.5848686", "0.5848686", "0.58375394", "0.58281547", "0.58228177", "0.5809672", "0.5803751", "0.58012855", "0.57715404", "0.5770111", "0.57564414", "0.574188", "0.5714904", "0.5705827", "0.56728995", "0.56614166", "0.5649508", "0.5648593", "0.56436783", "0.5633487", "0.5627201", "0.5617783", "0.5617496" ]
0.66047704
0
Caulculate the roof area from CC sections roof components and clading
def roof_cc(lenght, width, overhang=1, wall_height=3, roof_height=4): a = min(0.1 * lenght, 0.1 * width, 0.4 * (wall_height + 0.5 * roof_height)) area = (lenght + overhang)*(width + overhang) area_3 = 8*a**2 area_1 = (lenght - 2)*(width - 4*a) area_2 = area - area_3 - area_1 return area, area_1, area_2, area_3
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Intarea( xc, yc, r, x0, x1, y0, y1):\n\n#\n# Shift the objects so that the circle is at the origin.\n#\n x0 = x0 - xc\n y0 = y0 - yc\n x1 = x1 - xc\n y1 = y1 - yc\n\n return Oneside( x1, y0, y1, r ) + Oneside( y1, -x1, -x0, r ) +\\\n Oneside( -x0, -y1, -y0, r ) + Oneside( -y0, x0, x1, r )", "def compute_area(self):\r\n\r\n \"\"\"Косое произведение векторов\r\n A = (x2-x1; y2-y1; z2-z1)\r\n B = (x3-x1; y3-y1; z3-z1)\r\n S = 0.5*sqrt((Ay*Bz - Az*By)^2 + (Az*Bx - Ax*Bz)^2 + (Ax*By - Ay*Bx)^2 )\r\n \"\"\"\r\n a_x = self.x2 - self.x1\r\n a_y = self.y2 - self.y1\r\n a_z = self.z2 - self.z1\r\n\r\n b_x = self.x3 - self.x1\r\n b_y = self.y3 - self.y1\r\n b_z = self.z3 - self.z1\r\n\r\n self.area = 0.5 * math.sqrt((a_y * b_z - a_z * b_y) ** 2 + (a_z * b_x - a_x * b_z) ** 2 + (a_x * b_y - a_y * b_x) ** 2)\r\n\r\n \"\"\"По теореме Герона\"\"\"\r\n # a = math.sqrt((self.x1-self.x2)**2 + (self.y1-self.y2)**2 + (self.z1-self.z2)**2)\r\n # b = math.sqrt((self.x1-self.x3)**2 + (self.y1-self.y3)**2 + (self.z1-self.z3)**2)\r\n # c = math.sqrt((self.x2-self.x3)**2 + (self.y2-self.y3)**2 + (self.z2-self.z3)**2)\r\n # p = 0.5 * (a + b + c)\r\n # self.area = math.sqrt(p * (p - a) * (p - b) * (p - c))\r", "def walls_cc(lenght, width, wall_height=3, roof_height=4):\n a = min(0.1*lenght, 0.1*width, 0.4*(wall_height+0.5*roof_height))\n trian = 0.5*width*roof_height\n trian_5 = 0.5*a*a*(wall_height/roof_height)\n # trian_4 = trian - 2*trian_5\n\n area = (lenght + width)*2*wall_height + 2*trian\n area_5 = 8*a*wall_height + 4*trian_5\n area_4 = area - area_5\n\n return area, area_4, area_5", "def ecotope_area_sums(self, board):\n\n # clean up the input and merge into a single dataframe\n cols = ['geometry', 'z_reference', 'landuse', 'biosafe']\n board_clean = board.loc[board.biosafe, cols]\n board_eco = pd.merge(board_clean, self.vr_eco,\n on=['z_reference', 'landuse'])\n\n # optional: output gdf to shp\n # gdf = board_eco.copy()\n # gdf['biosafe'] = gdf.biosafe.values.astype('int')\n # gdf.to_file('board_eco.shp')\n\n # calculate the total area of all columns\n # note: landuse-z_reference combinations not in vr_ecotopes are\n # excluded\n area_eco1 = board_eco.groupby('ecotope1').sum()\n area_eco2 = board_eco.groupby('ecotope2').sum()\n area_fractions = pd.concat([area_eco1.fraction1, area_eco2.fraction2],\n axis=1, sort=True)\n area_total = area_fractions.fillna(0).sum(axis=1).reset_index()\n area_total.columns = ['ecotope', 'area_m2'] \n\n # assert that that total area of the ecotopes matches the biosafe\n # hexagons\n try:\n assert int(area_total.sum().area_m2) == int(board_clean.shape[0]),\\\n (\"ERROR: There appears to be one or more polygons that is not \" +\n \"detected correctly, resulting in a missmatch of the VR ecotopes\")\n except AssertionError as error:\n print(error)\n pass\n\n area_out = area_total.set_index('ecotope')\n area_out.index.name=None\n return area_out", "def area(self):", "def calc_section_force(aoa, mac, rot_center, casenum=1, networknum=1):\r\n\r\n chord = \"x\"\r\n span = \"y\"\r\n height = \"z\"\r\n coor2id = {\"x\": 1, \"y\": 2, \"z\": 3}\r\n chord_id = coor2id[chord]\r\n span_id = coor2id[span]\r\n height_id = coor2id[height]\r\n\r\n rot_center = np.asarray(rot_center)[[chord_id - 1, height_id - 1]]\r\n\r\n # read the pressure distribution from agps\r\n data = read_agps()\r\n network = data[int(networknum - 1)]\r\n result = []\r\n\r\n # calculate the local aerodynamic coefficients for each line in the network\r\n for line in network:\r\n if np.array_equal(line[0, 1:4], line[-1, 1:4]):\r\n line = line[:-1] # omit the last point in the line if it has the same xyz-coordinates as the first point\r\n y = float(line[0, span_id]) # coordinate of spanwise position\r\n line_clip = line[:, [chord_id, height_id]] # coordinate of cross section\r\n chord = np.max(line_clip[:, 0]) - np.min(line_clip[:, 0])\r\n\r\n # local coefficients are nan if chord is 0\r\n if chord == 0:\r\n nan = float(\"nan\")\r\n result.append([y, chord, nan, nan, nan])\r\n continue\r\n\r\n diff_coord = line_clip - np.roll(line_clip, 1, axis=0)\r\n length = np.linalg.norm(diff_coord, axis=1) # length between each vertex\r\n norm = np.fliplr(diff_coord / length[:, np.newaxis]) # norm vector between each vertex\r\n min_id = np.argmin(line_clip, axis=0) # vertex_id with the minimum coordinate\r\n flip = np.array([-1 if col[i] < 0 else 1 for (col, i) in zip(norm.T, min_id)])\r\n norm *= flip # flip the norm vectors to make them point inward\r\n\r\n coeff_all = [y, chord]\r\n # the definition of each variable is explained in the reference\r\n cp2 = line.T[int(3 + casenum)]\r\n cp1 = np.roll(cp2, 1, axis=0)\r\n cp3 = np.roll(cp2, -1, axis=0)\r\n n21 = norm\r\n n23 = np.roll(n21, -1, axis=0)\r\n l21 = length * 0.5\r\n l23 = np.roll(l21, -1, axis=0)\r\n cp21 = (0.75 * cp2 + 0.25 * cp1)[:, np.newaxis] * n21\r\n cp23 = (0.75 * cp2 + 0.25 * cp3)[:, np.newaxis] * n23\r\n cpdl = cp21 * l21[:, np.newaxis] + cp23 * l23[:, np.newaxis]\r\n cp_moment = cpdl * np.fliplr(line_clip - rot_center)\r\n cp_moment = cp_moment[:, 0] - cp_moment[:, 1]\r\n liftdragcoef = np.sum(cpdl, axis=0) / chord\r\n liftdragcoef = rot(liftdragcoef, aoa)\r\n cm = np.sum(cp_moment, axis=0) / (mac * chord)\r\n coeff = liftdragcoef.tolist()\r\n coeff.append(cm)\r\n coeff_all += coeff\r\n result.append(coeff_all)\r\n result = np.array(result)\r\n columns = [\"pos\", \"chord\", \"cd\", \"cl\", \"cm\"]\r\n result = pd.DataFrame(result, columns=columns)\r\n result.to_csv(\"section_force.csv\", index=False)", "def neff_rect(area: float, width: float, crange1: float, psill1: float, model1: str = 'Sph', crange2: float = None,\n psill2: float = None, model2: str = None) -> float:\n def hcov_sum(h, crange1=crange1, psill1=psill1, model1=model1, crange2=crange2, psill2=psill2, model2=model2):\n\n if crange2 is None or psill2 is None or model2 is None:\n return h*(cov(h, crange1, model=model1, psill=psill1))\n else:\n return h*(cov(h, crange1, model=model1, psill=psill1)+cov(h, crange2, model=model2, psill=psill2))\n\n width = min(width, area/width)\n\n full_int = integrate_fun(hcov_sum, 0, width/2)\n bin_int = np.linspace(width/2, area/width, 100)\n for i in range(len(bin_int)-1):\n low = bin_int[i]\n upp = bin_int[i+1]\n mid = bin_int[i] + (bin_int[i+1] - bin_int[i])/2\n piec_int = integrate_fun(hcov_sum, low, upp)\n full_int += piec_int * 2/np.pi*np.arctan(width/(2*mid))\n\n std_err = np.sqrt(2*np.pi*full_int / area)\n\n if crange2 is None or psill2 is None or model2 is None:\n return psill1 / std_err ** 2\n else:\n return (psill1 + psill2) / std_err ** 2", "def _area(self):\n self.area = 0.0\n for sail in self.sails:\n self.area += sail.area", "def computePointSectionArea(self,wingIndex,segmentIndex,eta,xsi):\n # tigl.wingGetUpperPoint(wingIndex, segmentIndex, eta -> y, xsi->x)\n # WARNING there is a slight difference in the area computed with this\n # method ans CPACSCREATOR. At the moment it is undetermined who is more\n # accurate.\n N = 20\n xsi1 = np.linspace(0,1,N)\n upper = np.empty((N,3))\n lower = np.empty((N,3))\n\n\n # t = np.max(np.abs(upper[:][2] - lower[:][2]))\n \n for i in range(N):\n U = self.tigl.wingGetUpperPoint(wingIndex,segmentIndex,eta,xsi1[i])\n L = self.tigl.wingGetLowerPoint(wingIndex,segmentIndex,eta,xsi1[i])\n upper[i] = np.array(U)\n lower[i] = np.array(L)\n v1 = upper[0]-upper[-1]\n v2 = upper[7] - lower[7]\n c = np.abs(upper[0][0] - upper[-1][0])\n t = np.max(np.abs(upper[:][2] - lower[:][2]))\n print(c)\n area = c*0.1*t\n # sys.exit()\n # v1xv2 = np.cross(v1,v2)\n # upper = np.flip(upper,axis=0)\n # wingSectionPoints = np.concatenate((upper, lower))\n # ey_0 = np.array([0,1,0])\n # e_1 = v1xv2\n # # Computes the cross prodct\n # cross = np.cross(ey_0,e_1)\n # normCross = np.linalg.norm(cross)\n # cross = cross/normCross\n # if normCross < 1e-8:\n # # No need to rotate\n # wingSectionPoints = np.delete(wingSectionPoints,1,1)\n # hull = ConvexHull(wingSectionPoints)\n # area = hull.volume\n # else:\n # ab = inner1d(ey_0,e_1)\n # a = np.linalg.norm(ey_0)\n # b = np.linalg.norm(e_1)\n # angle = np.arccos(ab / (a*b))\n # logger.debug(\"angle: \"+str(angle))\n # quat = angle*cross\n # r = R.from_rotvec(quat)\n # # Deletes the y column since the Convex hull will struggle with\n # # a 3d plane otherwise\n # wingSectionPoints = r.apply(wingSectionPoints)\n # wingSectionPoints = np.delete(wingSectionPoints,1,1)\n # hull = ConvexHull(wingSectionPoints)\n # # WARNING since we have built a 2D surface, the function is set up\n # # in a way that this is correct!\n # area = hull.volume\n\n logger.debug(\"Computed section area: \"+str(area))\n\n return area", "def section_coordinates():\n \n gh_width = 30.0 # in feet\n gh_width_west = gh_width/2.0\n N_x = 100\n dx = gh_width_west/100.0\n gh_length = 48 # in feet\n \n xvalues = np.linspace(0,(N_x)*dx,N_x+1) # array for width\n yvalues = np.linspace(0,gh_length,num=gh_length+1) # array for height\n zvalues_west = np.zeros(N_x+1) # array for height\n \n for i in range(0,len(xvalues)):\n zvalues_west[i] = 7.29944696 + (1.27415518*xvalues[i]) + (-0.0680139854*xvalues[i]**2) + (0.00152035861*xvalues[i]**3)\n i += 1\n \n roof_slopes_west = np.zeros(N_x+1)\n roof_lengths = np.zeros(N_x+1)\n\n total_length_west = 0\n\n for i in range(1,len(xvalues)):\n dz = zvalues_west[i] - zvalues_west[i-1]\n roof_slopes_west[i] = dz/dx\n roof_lengths[i] = (dz**2 + dx**2)**0.5\n total_length_west += roof_lengths[i]\n \n zvalues_east = np.flip(zvalues_west, axis=0)\n zvalues_west = zvalues_west[:-1]\n zvalues = np.concatenate((zvalues_west, zvalues_east), axis=0)\n \n xx, yy = np.meshgrid(xvalues, yvalues) \n \n plt.plot(xx, yy, marker='.', color='k', linestyle='none')\n plt.axis('equal')\n plt.show() \n\n return roof_slopes_west", "def _causal_measure(self, x, y):\r\n\t\tC_xy = self._cross_cumulant_4th(x, y)\r\n\t\tC_yx = self._cross_cumulant_4th(y, x)\r\n\t\tR = C_xy**2 - C_yx**2\r\n\t\treturn R", "def area(self):\n return (self.baselength1 + self.baselength2)*self.height/2", "def cone_area(radius: number, height: number) -> number:\n return pi*radius*(radius + sqrt(radius**2 + height**2))", "def area(self):\n ...", "def area(self) -> float:\n return cross3(self.b.position - self.a.position,\n self.c.position - self.a.position).length() / 2.0", "def getArea(self):\n seg = self._group_index\n groups = np.unique(seg)\n ng = len(groups)\n area = 0\n for i in range(ng):\n group_segments = np.where(groups[i] == seg)[0]\n nseg = len(group_segments) - 1\n for j in range(nseg):\n ind = group_segments[j]\n p0 = latlon2ecef(self._toplats[ind],\n self._toplons[ind],\n self._topdeps[ind])\n p1 = latlon2ecef(self._toplats[ind + 1],\n self._toplons[ind + 1],\n self._topdeps[ind + 1])\n p2 = latlon2ecef(self._botlats[ind + 1],\n self._botlons[ind + 1],\n self._botdeps[ind + 1])\n p3 = latlon2ecef(self._botlats[ind],\n self._botlons[ind],\n self._botdeps[ind])\n a = np.sqrt((p1[0] - p0[0])**2 +\n (p1[1] - p0[1])**2 +\n (p1[2] - p0[2])**2)\n b = np.sqrt((p2[0] - p0[0])**2 +\n (p2[1] - p0[1])**2 +\n (p2[2] - p0[2])**2)\n c = np.sqrt((p2[0] - p1[0])**2 +\n (p2[1] - p1[1])**2 +\n (p2[2] - p1[2])**2)\n s = (a + b + c) / 2\n A1 = np.sqrt(s * (s - a) * (s - b) * (s - c))\n a = np.sqrt((p0[0] - p3[0])**2 +\n (p0[1] - p3[1])**2 +\n (p0[2] - p3[2])**2)\n b = np.sqrt((p2[0] - p3[0])**2 +\n (p2[1] - p3[1])**2 +\n (p2[2] - p3[2])**2)\n c = np.sqrt((p0[0] - p2[0])**2 +\n (p0[1] - p2[1])**2 +\n (p0[2] - p2[2])**2)\n s = (a + b + c) / 2\n A2 = np.sqrt(s * (s - a) * (s - b) * (s - c))\n area = area + (A1 + A2) / 1000 / 1000\n return area", "def _cal_meaningful_corners(self):\n corners = np.where(self._free_of_clash)\n corners = np.array(corners, dtype=int)\n corners = corners.transpose()\n return corners", "def rcs(azmap, rmap, elmap, areaeffmap, sigma0map, vismap, rpol, azpol,\n elpol, DEM_res, DEM_xmin, DEM_ymin, rad_x, rad_y, beamwidth,\n pulsewidth, range_weighting=True, az_conv=0,\n raster_oversampling=1, verbose=True):\n\n nrows, ncols = azmap.shape\n area_unweighted = areaeffmap * sigma0map\n\n pulselength = pulsewidth * 3.e8 / 2. # [m]\n if az_conv is not None:\n az_conv_offset = az_conv / 2.\n else:\n az_conv_offset = 0\n\n beamwidth_rad = beamwidth * np.pi / 180.\n\n if not range_weighting:\n range_weight = 1 # unity\n\n if raster_oversampling == 0:\n N = 1\n elif raster_oversampling == 1:\n N = int(np.ceil(2 * DEM_res / pulselength))\n else:\n N = raster_oversampling\n\n if N != 1:\n # New dimensions\n nc = N * ncols\n nr = N * nrows\n\n # repeat the values NxN, equivalent of rebin in IDL\n elvals = np.repeat(np.repeat(elmap, N, axis=0), N, axis=1)\n areavals = np.repeat(np.repeat(area_unweighted / N ** 2,\n N, axis=0), N, axis=1)\n visvals = np.repeat(np.repeat(vismap, N, axis=0), N, axis=1)\n\n # New x- and y-vectors\n xvec = np.arange(nr) * DEM_res / N + DEM_xmin\n yvec = np.arange(nc) * DEM_res / N + DEM_ymin\n\n xdiff = (xvec - rad_x)\n ydiff = (yvec - rad_y)\n\n # New distance from radar map\n X, Y = np.meshgrid(xdiff, ydiff)\n rvals = np.sqrt(X ** 2 + Y ** 2)\n\n # New azimuth map\n azmap_rad = (np.arctan2(X, Y) + 2 * np.pi) % (2 * np.pi)\n azvals = azmap_rad * 180. / np.pi\n else:\n rvals = rmap\n azvals = azmap\n azmap_rad = azvals * np.pi / 180.\n elvals = elmap\n areavals = area_unweighted\n visvals = vismap\n\n elmap_rad = elvals * np.pi / 180.\n elevations_rad = np.array(elpol) * np.pi / 180.\n\n # Define the area around a point P(range, azimuth) where the cells\n # have a contribution to the RCS. This area is defined with the\n # range limits from range-dr_offset to range+dr_offset and the\n # azimuth limits from azimuth-daz_offset to azimuth+daz_offset.\n #\n # For a Gaussian antenna, azimuth offset more than 2*HPBW does not a\n # have remarkable contribution.\n # With a rectangular pulse and a matched filter cells farer away\n # than pulse length does not a have remarkable contribution.\n\n daz_offset = (2. * beamwidth) + az_conv_offset # [deg]\n dr_offset = pulselength # [m]\n\n nazim = len(azpol)\n nrange = len(rpol)\n # pyart storage format: 2D arrays (naz * nel, nranges)\n rcspolarmap = np.zeros((nazim * len(elpol), nrange)) + np.nan\n\n for rind in range(nrange):\n if verbose:\n logging.info(f'Computing range bin {rpol[rind]:2.1f}')\n rr = rpol[rind]\n\n indr = np.logical_and(np.logical_and(rvals >= rr - dr_offset,\n rvals < rr + dr_offset),\n visvals > 0)\n\n if not np.any(indr):\n continue\n\n indr = np.where(indr)\n\n for azind in range(nazim):\n az = azpol[azind]\n # Inside the loops over range (rr) and azimuth (az), the\n # coordinates (rr, az) describe the point P(rr, az) for which\n # the RCS is calculated. If more than one DEM cell is within\n # the area from az-daz/2 to az+daz/2 and from rr-dr/2 to\n # rr+dr/2, the calculated RCS value is set to all of these\n # cells (next neighbor).\n\n # Get area around rr and az\n azmin = az - daz_offset\n azmax = az + daz_offset\n if azmin < 0:\n azmin = 360. + azmin\n indaz = np.logical_or(np.logical_and(azvals[indr] >= 0,\n azvals[indr] < azmax),\n np.logical_and(azvals[indr] >= azmin,\n azvals[indr] <= 360.))\n elif azmax > 360:\n azmax = azmax - 360.\n indaz = np.logical_or(np.logical_and(azvals[indr] >= azmin,\n azvals[indr] <= 360),\n np.logical_and(azvals[indr] >= 0,\n azvals[indr] < azmax))\n else:\n indaz = np.logical_and(azvals[indr] >= azmin,\n azvals[indr] < azmax)\n\n # Cells that contribute to the cells to set indset\n inda = tuple([indr[0][indaz], indr[1][indaz]])\n\n # Calculate offsets in azimuth and elevation to the\n # point P(rr,az) and the elevation angle of the antenna.\n\n daz_area = azmap_rad[inda] - (az * np.pi / 180.)\n\n indaz = daz_area > np.pi\n daz_area[indaz] = daz_area[indaz] - 2. * np.pi\n\n indaz = daz_area < -np.pi\n daz_area[indaz] = daz_area[indaz] + 2. * np.pi\n\n if range_weighting:\n # Get the weighting factor due to the range offset.\n range_weight = range_weights(rvals[inda], rr,\n pulselength)\n\n ind_rzero = rvals[inda] <= 0.0\n if np.any(ind_rzero):\n continue\n\n for iel, el in enumerate(elevations_rad):\n del_area = elmap_rad[inda] - el\n\n # Get the two-way weighting factor due to the azimuth offset\n # to the main antenna direction (assuming a Gaussian antenna\n # pattern).\n ant_weight = antenna_pattern_gauss(\n daz_area,\n del_area,\n beamwidth_rad,\n twoway=True,\n az_conv=az_conv *\n np.pi /\n 180.,\n units='rad')\n\n # RCS = SUM_j sigma_j\n # = SUM_j sigma0_j * A_eff_j * fa(dphi_j,dteta_j)^2 * fr(drange)\n # where\n # sigma_j : Backscattering cross section of each cell [m^2]\n # sigma0_j : Sigma naught of each cell [1]\n # A_eff_j : Effective area of each cell [m^2]\n # fa : One-way weighting function due to the azimuth\n # and elevation offsets.\n # fr : Range weighting function due to the range offset\n\n # RCS contribution of each cell inside the contribution\n # area.\n rcs_area = ant_weight * range_weight * areavals[inda]\n # Sum up all the contributions\n rcs = np.nansum(rcs_area)\n\n if rcs < RCS_MIN:\n rcs = np.nan\n\n # Set rcs to all values inside the set area.\n\n rcspolarmap[azind + iel * nazim, rind] = rcs\n return rcspolarmap", "def __bcc_left_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def FillupArea(self):\r\n\r\n drillsize = self.FromUserUnit(float(self.m_txtViaDrillSize.GetValue()))\r\n viasize = self.FromUserUnit(float(self.m_txtViaSize.GetValue()))\r\n step_x = self.FromUserUnit(float(self.m_txtHSpacing.GetValue()))\r\n step_y = self.FromUserUnit(float(self.m_txtVSpacing.GetValue()))\r\n clearance = self.FromUserUnit(float(self.m_txtClearance.GetValue()))\r\n self.randomize = self.m_chkRandomize.GetValue()\r\n self.clearance = clearance\r\n bbox = self.area.GetBoundingBox()\r\n top = bbox.GetTop()\r\n bottom = bbox.GetBottom()\r\n right = bbox.GetRight()\r\n left = bbox.GetLeft()\r\n netname = self.m_cbNet.GetStringSelection()\r\n netcode = self.board.GetNetcodeFromNetname(netname)\r\n # commit = pcbnew.COMMIT()\r\n viacount = 0\r\n x = left\r\n\r\n # Cycle trough area bounding box checking and implanting vias\r\n layer = self.area.GetLayer()\r\n\r\n while x <= right:\r\n y = top\r\n while y <= bottom:\r\n if self.randomize:\r\n xp = x + random.uniform(-1, 1) * step_x / 5\r\n yp = y + random.uniform(-1, 1) * step_y / 5\r\n else:\r\n xp = x\r\n yp = y\r\n\r\n if hasattr(pcbnew, 'VECTOR2I'):\r\n p = pcbnew.VECTOR2I(xp, yp)\r\n else:\r\n if(hasattr(pcbnew, 'wxPoint')):\r\n p = pcbnew.wxPoint(xp, yp)\r\n\r\n if self.area.HitTestFilledArea(layer, p, 0):\r\n via = pcbnew.PCB_VIA(self.board)\r\n via.SetPosition(p)\r\n via.SetLayer(layer)\r\n via.SetNetCode(netcode)\r\n # Set up via with clearance added to its size-> bounding box check will be OK in worst case, may be too conservative, but additional checks are possible if needed\r\n # TODO: possibly take the clearance from the PCB settings instead of the dialog\r\n # Clearance is all around -> *2\r\n via.SetDrill(drillsize + 2 * clearance)\r\n via.SetWidth(viasize + 2 * clearance)\r\n # via.SetTimeStamp(__timecode__)\r\n if not self.CheckOverlap(via):\r\n # Check clearance only if clearance value differs from 0 (disabled)\r\n if (clearance == 0) or self.CheckClearance(via, self.area, clearance):\r\n via.SetWidth(viasize)\r\n via.SetDrill(drillsize)\r\n self.board.Add(via)\r\n # commit.Add(via)\r\n self.pcb_group.AddItem(via)\r\n viacount += 1\r\n y += step_y\r\n x += step_x\r\n\r\n if viacount > 0:\r\n wx.MessageBox(_(u\"Implanted: %d vias!\") % viacount)\r\n # commit.Push()\r\n pcbnew.Refresh()\r\n else:\r\n wx.MessageBox(_(u\"No vias implanted!\"))", "def cylinder_area(radius: number, height: number) -> number:\n area = 2*pi*radius*(radius+height)\n return area", "def area(self):\n\t\t#print (self.radius*self.radius*math.pi)\n\t\tcircle_area = (self.radius*self.radius*math.pi)\n\t\treturn circle_area", "def get_cape(temp,pres,dewpt,hght,startp,startt,startdp,totalcape=False): \n\n # Check units\n # Init temp is startt in C, Init dew point is stwrtdp,\n # pressure levels are in hPa \n temp = temp - 273.15 # convert temperature to celsius\n dewpt = dewpt - 273.15 # convert dewpoint to celsius\n pres = pres/100 # convert pressure to hPa\n \n \n inds = np.where( (pres < startp) ) \n tmp = pres[inds]\n del pres\n #pres = tmp[::-1]\n pres = tmp[:]\n del tmp \n startp = startp/100\n \n tmp = temp[inds]\n del temp\n #temp = tmp[::-1]\n temp = tmp[:]\n del tmp \n\n tmp = dewpt[inds]\n del dewpt\n #dewpt = tmp[::-1]\n dewpt = tmp[:]\n del tmp \n\n tmp = hght[inds]\n del hght\n #hght = tmp[::-1]\n hght = tmp[:]\n del tmp \n\n \n # Get Sub-LCL traces \n presdry,tempdry,tempiso=dry_ascent(startp,startt-degCtoK,startdp-degCtoK) \n \n\n # make lcl variables explicit\n P_lcl=presdry[-1]\n T_lcl=tempdry[-1]\n\n # Now lift a wet parcel from the intersection point\n # preswet=linspace(P_lcl,100,101)\n preswet,tempwet=moist_ascent(P_lcl,T_lcl)\n\n # tparcel is the concatenation of tempdry and \n # tempwet, and so on.\n \n tparcel=np.concatenate((tempdry,tempwet[1:]))\n pparcel=np.concatenate((presdry,preswet[1:]))\n\n # Interpolating the environmental profile onto the \n # parcel pressure coordinate\n # tempenv=interp(preswet,pres[::-1],temp[::-1])\n ## NEW, for total column:\n tempenv=interp(pparcel,pres[::-1],temp[::-1])\n\n\n # now solve for the equlibrium levels above LCL\n # (all of them, including unstable ones)\n # eqlev,stab=solve_eq(preswet[::-1],(tempwet-tempenv)[::-1])\n # NEW, for total column:\n # On second thought, we don't really want/need\n # any equilibrium levels below LCL\n # eqlev,stab=solve_eq(pparcel[::-1],(tparcel-tempenv)[::-1])\n # This is equivalent to the old statement :\n eqlev,stab=solve_eq(pparcel[pparcel<=P_lcl][::-1],\\\n (tparcel-tempenv)[pparcel<=P_lcl][::-1])\n\n aa = tparcel-tempenv\n\n # Sorting index by decreasing pressure\n I=np.argsort(eqlev)[::-1]\n eqlev=eqlev[I]; stab=stab[I]\n\n # temperatures at the equilibrium level\n # tempeq=interp(eqlev,preswet[::-1],tempenv[::-1])\n ## NEW, for total column:\n tempeq=interp(eqlev,pparcel[::-1],tparcel[::-1])\n\n # This helps with debugging\n # for ii,eq in enumerate(eqlev):\n # print \"%5.2f %5.2f %2d\"%(eq,tempeq[ii],stab[ii])\n\n # need environmental temperature at LCL\n tenv_lcl=interp(P_lcl,pparcel[::-1],tempenv[::-1])\n\n isstab=np.where(stab==1.,True,False)\n unstab=np.where(stab==1.,False,True) \n\n if eqlev.shape[0]==0:\n # no unstable layers in entire profile\n # because the parcel never crosses the tenv\n P_lfc=float('NaN')\n P_el=float('NaN')\n elif T_lcl>tenv_lcl:\n # check LCL to see if this is unstable\n P_lfc=P_lcl\n if totalcape:\n P_el=eqlev[isstab][-1]\n else:\n P_el=eqlev[isstab][0]\n elif eqlev.shape[0]>1:\n # Parcel is stable at LCL so LFC is the \n # first unstable equilibrium level and \n # \"EQ\" level is the first stable equilibrium \n # level\n P_lfc=eqlev[unstab][0]\n if totalcape:\n P_el=eqlev[isstab][-1]\n else:\n P_el=eqlev[isstab][0]\n else:\n # catch a problem... if there is only\n # one eqlev and it's stable (this is \n # unphysical), then it could be a vertical\n # resolution thing. This is a kind of \n # \"null\" option\n try:\n\t P_el=eqlev[isstab][0]\n P_lfc=eqlev[isstab][0]\n except:\n\t P_el=eqlev[unstab][0]\n P_lfc=eqlev[unstab][0]\t\n\t\n if np.isnan(P_lfc):\n return P_lcl,P_lfc,P_el,0,0\n\n # need to handle case where dwpt is not available \n # above a certain level for any reason. Most simplest \n # thing to do is set it to a reasonably low value; \n # this should be a conservative approach!\n \n #dwpt=dewpt.copy().soften_mask()\n [inds] = np.where(np.isnan(dewpt))\n dwpt = dewpt\n dwpt[inds] = dwpt.min()\n \n # raise ValueError\n #if dwpt[(pres>=P_el).data*(pres<P_lfc).data].mask.any():\n # print \"WARNING: substituting dwpt.min() for masked values of DWPT in this sounding\"\n #dwpt[dwpt.mask]=dwpt.min()\n # dwptenv=interp(preswet,pres[::-1],dwpt[::-1])\n # NEW:\n\n dwptenv=interp(pparcel,pres[::-1],dwpt[::-1])\n\n\n \n #if hght[(pres>=P_el).data].mask.any():\n # raise NotImplementedError, \"TODO: Implement standard atmosphere to substitute missing heights\"\n # hghtenv=interp(preswet,pres[::-1],self.soundingdata['hght'][::-1])\n # NEW:\n hghtenv=interp(pparcel,pres[::-1],hght[::-1])\n \n\n # Areas of POSITIVE Bouyancy\n # cond1=(tempwet>=tempenv)*(preswet<=P_lfc)*(preswet>P_el)\n # NEW:\n cond1=(tparcel>=tempenv)*(pparcel<=P_lfc)*(pparcel>P_el)\n # Areas of NEGATIVE Bouyancy\n # cond2=(tempwet<tempenv)*(preswet<=P_lcl)*(preswet>P_el)\n # NEW:\n if totalcape:\n cond2=(tparcel<tempenv)*(pparcel>P_el)\n else:\n cond2=(tparcel<tempenv)*(pparcel>P_lfc)\n # Do CAPE calculation\n # 1. Virtual temperature of parcel... remember it's saturated above LCL.\n # e_parcel=SatVap(tempwet)\n # Tv_parcel=VirtualTemp(tempwet+degCtoK,preswet*100.,e_parcel)\n # e_env=SatVap(dwptenv)\n # Tv_env=VirtualTemp(tempenv+degCtoK,preswet*100.,e_env)\n # NEW:\n e_parcel=SatVap(tparcel)\n Tv_parcel=VirtualTemp(tparcel+degCtoK,pparcel*100.,e_parcel)\n e_env=SatVap(dwptenv)\n Tv_env=VirtualTemp(tempenv+degCtoK,pparcel*100.,e_env)\n\n CAPE=trapz(9.81*(Tv_parcel[cond1]-Tv_env[cond1])/Tv_env[cond1],hghtenv[cond1])\n CIN=trapz(9.81*(Tv_parcel[cond2]-Tv_env[cond2])/Tv_env[cond2],hghtenv[cond2])\n\n return P_lcl,P_lfc,P_el,CAPE,CIN", "def __bcc_top_diagonals(self) -> cq.cq.Workplane:\n # In a cuboid ABCDA1B1C1D1 this is the angle C1AD\n #angle_C1AD = 90 - degrees(acos(2**-.5))\n angle_C1AD = 90 - degrees(acos(0.5))\n angle_CAD = 90 - degrees(acos(sqrt(2/3)))\n pseudo_unit_cell_size = sqrt(2/3)*self.unit_cell_size\n corner_points = np.array(\n [(0, 0),\n (self.bcc_unit_cell_size, 0),\n (self.bcc_unit_cell_size, self.unit_cell_size),\n (0, self.unit_cell_size)]\n )\n result = (\n cq.Workplane(\"XY\")\n .pushPoints(corner_points)\n .eachpointAdaptive(\n create_bcc_diagonal_strut,\n callback_extra_args = [\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": - angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": - angle_C1AD},\n {\"unit_cell_size\": 0.5 * pseudo_unit_cell_size,\n \"radius\": self.strut_radius,\n \"angle_x\": angle_CAD,\n \"angle_y\": angle_C1AD}\n ],\n useLocalCoords = True\n )\n )\n return result", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def area(self, cc_index=None):\n if cc_index is not None:\n return self.areas[cc_index]\n return np.sum(self.areas)", "def calc_ros(self, *args):\n return 0", "def explainAreaSmall(self):\n \n #EXPLANATION NO. 1\n #fadeout the non-required areas\n self.play(FadeOut(area_ABC_copy), FadeOut(area_ABD_copy),\n FadeOut(geq_2), FadeOut(geq_1),\n FadeOut(area_ABC), FadeOut(area_ABD))\n \n #expand the required area\n self.play(area_ABE_copy.animate.scale(2).move_to(RIGHT*2))\n\n #surrounding text\n abe_text_1 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\text{Area of } \\\\triangle ABE\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n\n #half base height\n abe_text_2 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\times\", \"\\\\text{base}\", \"\\\\times\", \"\\\\text{height}\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n\n #write texts\n self.play(Write(abe_text_1))\n self.wait()\n self.play(ReplacementTransform(abe_text_1[0], abe_text_2[0]),\n ReplacementTransform(abe_text_1[1:], abe_text_2[1:]))\n self.wait()\n\n #defining braces\n abe_base_brace = always_redraw(\n lambda : Brace(radius_ang, DOWN)\n )\n abe_base_brace_label = always_redraw(\n lambda : MathTex(\"R\\\\cos\\\\theta\").scale(0.6).next_to(abe_base_brace, DOWN)\n )\n abe_height_brace = always_redraw(\n lambda : Brace(radius_ang, LEFT)\n )\n abe_height_brace_label = always_redraw(\n lambda : MathTex(\"R\\\\sin\\\\theta\").scale(0.6).next_to(abe_height_brace, LEFT)\n )\n\n self.play(Write(abe_base_brace), Write(abe_height_brace))\n self.play(Write(abe_base_brace_label), Write(abe_height_brace_label))\n self.wait()\n\n \n #back to editing the equation\n abe_text_3 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\times\", \"R\\\\cos\\\\theta\", \"\\\\times\", \"R\\\\sin\\\\theta\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n\n self.play(ReplacementTransform(abe_text_2[0:], abe_text_3[0:]))\n self.wait(0.5)\n self.play(FadeOut(abe_base_brace), FadeOut(abe_height_brace),\n FadeOut(abe_base_brace_label), FadeOut(abe_height_brace_label))\n \n abe_text_4 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\times\", \"\\\\cos x\", \"\\\\times\", \"\\\\sin x\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n self.play(ReplacementTransform(abe_text_3[0:], abe_text_4[0:]))\n\n abe_text_5 = always_redraw(\n lambda : MathTex(\"=\", \"\\\\dfrac{1}{2}\", \"\\\\sin x\", \"\\\\cos x\").scale(0.8).next_to(area_ABE_copy, RIGHT)\n )\n self.play(ReplacementTransform(abe_text_4[0:2], abe_text_5[0:2]),\n ReplacementTransform(abe_text_4[2:], abe_text_5[2:]))\n\n #vgroup for drawing box\n abe_group = VGroup(abe_text_5, area_ABE_copy)\n abe_formula_box = SurroundingRectangle(abe_group, color=PINK)\n\n self.play(Write(abe_formula_box))\n self.wait()\n\n #remove all elements\n self.play(FadeOut(abe_formula_box), FadeOut(abe_text_5), FadeOut(area_ABE_copy), FadeOut(area_ABE))", "def update_caeros(self, obj):\n model = self.model # type: BDF\n xref_errors = {}\n model._uncross_reference_aero()\n model._cross_reference_aero(check_caero_element_ids=False)\n obj.uncross_reference()\n obj.safe_cross_reference(model, xref_errors)\n\n out = self.make_caeros(model)\n (has_caero, caero_points, ncaeros, ncaeros_sub, ncaeros_cs,\n ncaeros_points, ncaero_sub_points,\n has_control_surface, box_id_to_caero_element_map, cs_box_ids) = out\n self.has_caero = has_caero\n self._create_aero(model, box_id_to_caero_element_map, cs_box_ids,\n caero_points, ncaeros_points, ncaero_sub_points,\n has_control_surface)\n self.Render()", "def _contract_by_area(slabs, dA=0.5):\n\n # In refl1d the first slab is the substrate, the order is reversed here.\n # In the following code the slabs are traversed from the backing towards\n # the fronting.\n newslabs = np.copy(slabs)[::-1]\n d = newslabs[:, 0]\n rho = newslabs[:, 1]\n irho = newslabs[:, 2]\n sigma = newslabs[:, 3]\n vfsolv = newslabs[:, 4]\n\n n = np.size(d, 0)\n i = newi = 1 # Skip the substrate\n\n while i < n:\n # Get ready for the next layer\n # Accumulation of the first row happens in the inner loop\n dz = rhoarea = irhoarea = vfsolvarea = 0.0\n rholo = rhohi = rho[i]\n irholo = irhohi = irho[i]\n\n # Accumulate slices into layer\n while True:\n # Accumulate next slice\n dz += d[i]\n rhoarea += d[i] * rho[i]\n irhoarea += d[i] * irho[i]\n vfsolvarea += d[i] * vfsolv[i]\n\n i += 1\n # If no more slices or sigma != 0, break immediately\n if i == n or sigma[i - 1] != 0.0:\n break\n\n # If next slice won't fit, break\n if rho[i] < rholo:\n rholo = rho[i]\n if rho[i] > rhohi:\n rhohi = rho[i]\n if (rhohi - rholo) * (dz + d[i]) > dA:\n break\n\n if irho[i] < irholo:\n irholo = irho[i]\n if irho[i] > irhohi:\n irhohi = irho[i]\n if (irhohi - irholo) * (dz + d[i]) > dA:\n break\n\n # Save the layer\n d[newi] = dz\n if i == n:\n # printf(\"contract: adding final sld at %d\\n\",newi)\n # Last layer uses surface values\n rho[newi] = rho[n - 1]\n irho[newi] = irho[n - 1]\n vfsolv[newi] = vfsolv[n - 1]\n else:\n # Middle layers uses average values\n rho[newi] = rhoarea / dz\n irho[newi] = irhoarea / dz\n sigma[newi] = sigma[i - 1]\n vfsolv[newi] = vfsolvarea / dz\n # First layer uses substrate values\n newi += 1\n\n return newslabs[:newi][::-1]" ]
[ "0.63638693", "0.62987286", "0.5934985", "0.57753664", "0.57585657", "0.57120097", "0.56629455", "0.56081516", "0.5587647", "0.5574878", "0.55626607", "0.5532842", "0.5528696", "0.54673076", "0.5436828", "0.5425758", "0.5423959", "0.5423923", "0.5408869", "0.54074836", "0.53922", "0.53895783", "0.5379213", "0.5369683", "0.5368939", "0.5361919", "0.53605443", "0.5360335", "0.53476566", "0.5333403" ]
0.7076949
0
Caulculate the area of the roof sections from mwfrs
def roof_mwfrs(lenght, width, overhang=1, wall_height=3, roof_height=4): h = wall_height + 0.5*roof_height area = (lenght + overhang) * (width + overhang) area_1 = 0.5*h*width area_2 = 0.5*h*width area_3 = h*width area_4 = area - area_1 -area_2 - area_3 return area, area_1, area_2, area_3, area_4
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_area(self):\r\n\r\n \"\"\"Косое произведение векторов\r\n A = (x2-x1; y2-y1; z2-z1)\r\n B = (x3-x1; y3-y1; z3-z1)\r\n S = 0.5*sqrt((Ay*Bz - Az*By)^2 + (Az*Bx - Ax*Bz)^2 + (Ax*By - Ay*Bx)^2 )\r\n \"\"\"\r\n a_x = self.x2 - self.x1\r\n a_y = self.y2 - self.y1\r\n a_z = self.z2 - self.z1\r\n\r\n b_x = self.x3 - self.x1\r\n b_y = self.y3 - self.y1\r\n b_z = self.z3 - self.z1\r\n\r\n self.area = 0.5 * math.sqrt((a_y * b_z - a_z * b_y) ** 2 + (a_z * b_x - a_x * b_z) ** 2 + (a_x * b_y - a_y * b_x) ** 2)\r\n\r\n \"\"\"По теореме Герона\"\"\"\r\n # a = math.sqrt((self.x1-self.x2)**2 + (self.y1-self.y2)**2 + (self.z1-self.z2)**2)\r\n # b = math.sqrt((self.x1-self.x3)**2 + (self.y1-self.y3)**2 + (self.z1-self.z3)**2)\r\n # c = math.sqrt((self.x2-self.x3)**2 + (self.y2-self.y3)**2 + (self.z2-self.z3)**2)\r\n # p = 0.5 * (a + b + c)\r\n # self.area = math.sqrt(p * (p - a) * (p - b) * (p - c))\r", "def _area(self):\n self.area = 0.0\n for sail in self.sails:\n self.area += sail.area", "def area(self):\n return (self.baselength1 + self.baselength2)*self.height/2", "def area(self):", "def section_coordinates():\n \n gh_width = 30.0 # in feet\n gh_width_west = gh_width/2.0\n N_x = 100\n dx = gh_width_west/100.0\n gh_length = 48 # in feet\n \n xvalues = np.linspace(0,(N_x)*dx,N_x+1) # array for width\n yvalues = np.linspace(0,gh_length,num=gh_length+1) # array for height\n zvalues_west = np.zeros(N_x+1) # array for height\n \n for i in range(0,len(xvalues)):\n zvalues_west[i] = 7.29944696 + (1.27415518*xvalues[i]) + (-0.0680139854*xvalues[i]**2) + (0.00152035861*xvalues[i]**3)\n i += 1\n \n roof_slopes_west = np.zeros(N_x+1)\n roof_lengths = np.zeros(N_x+1)\n\n total_length_west = 0\n\n for i in range(1,len(xvalues)):\n dz = zvalues_west[i] - zvalues_west[i-1]\n roof_slopes_west[i] = dz/dx\n roof_lengths[i] = (dz**2 + dx**2)**0.5\n total_length_west += roof_lengths[i]\n \n zvalues_east = np.flip(zvalues_west, axis=0)\n zvalues_west = zvalues_west[:-1]\n zvalues = np.concatenate((zvalues_west, zvalues_east), axis=0)\n \n xx, yy = np.meshgrid(xvalues, yvalues) \n \n plt.plot(xx, yy, marker='.', color='k', linestyle='none')\n plt.axis('equal')\n plt.show() \n\n return roof_slopes_west", "def get_binary_rf_area(self):\n\n if self.thr is None:\n raise LookupError('To th area, the receptive field should be thresholded!!')\n\n alt_step = abs(np.mean(np.diff(self.altPos).astype(np.float)))\n azi_step = abs(np.mean(np.diff(self.aziPos).astype(np.float)))\n\n return len(self.weights) * alt_step * azi_step", "def computePointSectionArea(self,wingIndex,segmentIndex,eta,xsi):\n # tigl.wingGetUpperPoint(wingIndex, segmentIndex, eta -> y, xsi->x)\n # WARNING there is a slight difference in the area computed with this\n # method ans CPACSCREATOR. At the moment it is undetermined who is more\n # accurate.\n N = 20\n xsi1 = np.linspace(0,1,N)\n upper = np.empty((N,3))\n lower = np.empty((N,3))\n\n\n # t = np.max(np.abs(upper[:][2] - lower[:][2]))\n \n for i in range(N):\n U = self.tigl.wingGetUpperPoint(wingIndex,segmentIndex,eta,xsi1[i])\n L = self.tigl.wingGetLowerPoint(wingIndex,segmentIndex,eta,xsi1[i])\n upper[i] = np.array(U)\n lower[i] = np.array(L)\n v1 = upper[0]-upper[-1]\n v2 = upper[7] - lower[7]\n c = np.abs(upper[0][0] - upper[-1][0])\n t = np.max(np.abs(upper[:][2] - lower[:][2]))\n print(c)\n area = c*0.1*t\n # sys.exit()\n # v1xv2 = np.cross(v1,v2)\n # upper = np.flip(upper,axis=0)\n # wingSectionPoints = np.concatenate((upper, lower))\n # ey_0 = np.array([0,1,0])\n # e_1 = v1xv2\n # # Computes the cross prodct\n # cross = np.cross(ey_0,e_1)\n # normCross = np.linalg.norm(cross)\n # cross = cross/normCross\n # if normCross < 1e-8:\n # # No need to rotate\n # wingSectionPoints = np.delete(wingSectionPoints,1,1)\n # hull = ConvexHull(wingSectionPoints)\n # area = hull.volume\n # else:\n # ab = inner1d(ey_0,e_1)\n # a = np.linalg.norm(ey_0)\n # b = np.linalg.norm(e_1)\n # angle = np.arccos(ab / (a*b))\n # logger.debug(\"angle: \"+str(angle))\n # quat = angle*cross\n # r = R.from_rotvec(quat)\n # # Deletes the y column since the Convex hull will struggle with\n # # a 3d plane otherwise\n # wingSectionPoints = r.apply(wingSectionPoints)\n # wingSectionPoints = np.delete(wingSectionPoints,1,1)\n # hull = ConvexHull(wingSectionPoints)\n # # WARNING since we have built a 2D surface, the function is set up\n # # in a way that this is correct!\n # area = hull.volume\n\n logger.debug(\"Computed section area: \"+str(area))\n\n return area", "def area(r):\n return np.pi * (r ** 2)", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def area(self):\n return self.length*self.length", "def _calc_area(LMTD, U, Q, ft) -> 'Area':\n return Q/(U*LMTD*ft)", "def calculate_ribbon_required(l: int, w: int, h: int) -> int:\n side_perimeters = ((l + w) * 2, (w + h) * 2, (l + h) * 2)\n present_total = min(side_perimeters)\n bow = l * w * h\n result = present_total + bow\n return result", "def area(self):\n return 0.5*np.abs(np.dot(self.x,np.roll(self.y,1))-np.dot(self.y,np.roll(self.x,1)))", "def roof_cc(lenght, width, overhang=1, wall_height=3, roof_height=4):\n a = min(0.1 * lenght, 0.1 * width, 0.4 * (wall_height + 0.5 * roof_height))\n\n area = (lenght + overhang)*(width + overhang)\n area_3 = 8*a**2\n area_1 = (lenght - 2)*(width - 4*a)\n area_2 = area - area_3 - area_1\n return area, area_1, area_2, area_3", "def getArea(self):\n seg = self._group_index\n groups = np.unique(seg)\n ng = len(groups)\n area = 0\n for i in range(ng):\n group_segments = np.where(groups[i] == seg)[0]\n nseg = len(group_segments) - 1\n for j in range(nseg):\n ind = group_segments[j]\n p0 = latlon2ecef(self._toplats[ind],\n self._toplons[ind],\n self._topdeps[ind])\n p1 = latlon2ecef(self._toplats[ind + 1],\n self._toplons[ind + 1],\n self._topdeps[ind + 1])\n p2 = latlon2ecef(self._botlats[ind + 1],\n self._botlons[ind + 1],\n self._botdeps[ind + 1])\n p3 = latlon2ecef(self._botlats[ind],\n self._botlons[ind],\n self._botdeps[ind])\n a = np.sqrt((p1[0] - p0[0])**2 +\n (p1[1] - p0[1])**2 +\n (p1[2] - p0[2])**2)\n b = np.sqrt((p2[0] - p0[0])**2 +\n (p2[1] - p0[1])**2 +\n (p2[2] - p0[2])**2)\n c = np.sqrt((p2[0] - p1[0])**2 +\n (p2[1] - p1[1])**2 +\n (p2[2] - p1[2])**2)\n s = (a + b + c) / 2\n A1 = np.sqrt(s * (s - a) * (s - b) * (s - c))\n a = np.sqrt((p0[0] - p3[0])**2 +\n (p0[1] - p3[1])**2 +\n (p0[2] - p3[2])**2)\n b = np.sqrt((p2[0] - p3[0])**2 +\n (p2[1] - p3[1])**2 +\n (p2[2] - p3[2])**2)\n c = np.sqrt((p0[0] - p2[0])**2 +\n (p0[1] - p2[1])**2 +\n (p0[2] - p2[2])**2)\n s = (a + b + c) / 2\n A2 = np.sqrt(s * (s - a) * (s - b) * (s - c))\n area = area + (A1 + A2) / 1000 / 1000\n return area", "def area(self):\n if isinstance(self.crs, GeographicalCRS):\n major_axis = self.crs.ellipsoid.a\n minor_axis = self.crs.ellipsoid.b\n\n area = 0.0\n if major_axis == minor_axis: # Sphere\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.spherical_area(major_axis, x1, y1, x2, y2)\n\n else:\n for seg in self.segment_tuples:\n x1, y1 = seg[0]\n x2, y2 = seg[1]\n area += geodesy.ellipsoidal_area(major_axis, minor_axis,\n x1, y1, x2, y2)\n\n else:\n # Cartesian coordinate systems\n x, y = self.coordinates\n x0 = np.min(x)\n area = (0.5*(x[0] + x[-1]) - x0) * (y[0] - y[-1])\n area += sum((0.5*(x[i+1]+x[i]) - x0) * (y[i+1] - y[i]) for i in range(len(x)-1))\n return abs(area) - sum(sub.area for sub in self.subs)", "def area(boxes):\n return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])", "def total_area(self):\n return numpy.prod([r[1] - r[0] for r in self.range_])", "def area(self) -> float:\n return cross3(self.b.position - self.a.position,\n self.c.position - self.a.position).length() / 2.0", "def area(self):\n return math.pi * self._r ** 2", "def calculate(self):\n\n return self._calculate_area(self.ground_truth, self.slice_number)", "def total_area(self) :\n area = 0\n for i in self.residues :\n area += i.solvent_acc_area\n return area", "def area(self):\n area = 0\n\n for room in self.rooms:\n area += room.polygon.area()\n\n for wall in self.walls:\n area += wall.polygon.area()\n\n return area", "def calculate_area(surfname,fwhm):\n try:\n subprocess.call(\"depth_potential -area_voronoi \" + surfname + \" /tmp/tmp_area.txt\",shell=True)\n subprocess.call(\"depth_potential -smooth \" + str(fwhm) + \" /tmp/tmp_area.txt \" + surfname + \" /tmp/sm_area.txt\",shell=True)\n area=np.loadtxt(\"/tmp/sm_area.txt\")\n subprocess.call(\"rm /tmp/sm_area.txt /tmp/tmp_area.txt\",shell=True)\n except OSError:\n print(\"depth_potential not found, please install CIVET tools or replace with alternative area calculation/data smoothing\")\n return 0;\n return area;", "def area(\n self):\n pi = numpy.pi\n area0 = 4.0 * pi / 8.0\n areadiv = 4.0 ** self.depth\n area = area0 / areadiv * (180.0 / pi) ** 2\n return area", "def findArea(self):\n\n a, b = self.sides\n area = a * b\n print(f\"Are of rectangle is: {area}\")", "def roof_measurements():\n \n # roof measurements from excel\n path_inputs = '/Users/bekah/Desktop/class/be523/modeling_project/roof_function.xlsx'\n sheet_name_setup = 'roof'\n roof = pd.read_excel(path_inputs, sheetname = sheet_name_setup)\n \n # make arrays for east and west roof sections\n x_west = roof['x_west']\n z_west = roof['z_west']\n x_east = roof['x_east']\n z_east = roof['z_east']\n \n return x_west, z_west, x_east, z_east", "def area(self):\n ...", "def area(self) -> float:\n raise NotImplementedError", "def Intarea( xc, yc, r, x0, x1, y0, y1):\n\n#\n# Shift the objects so that the circle is at the origin.\n#\n x0 = x0 - xc\n y0 = y0 - yc\n x1 = x1 - xc\n y1 = y1 - yc\n\n return Oneside( x1, y0, y1, r ) + Oneside( y1, -x1, -x0, r ) +\\\n Oneside( -x0, -y1, -y0, r ) + Oneside( -y0, x0, x1, r )" ]
[ "0.6690208", "0.63474447", "0.631824", "0.622642", "0.62213504", "0.6113038", "0.6052953", "0.6041878", "0.6038184", "0.6016974", "0.6003224", "0.60003316", "0.59857655", "0.59566283", "0.5944727", "0.59417486", "0.5940033", "0.5937483", "0.5933765", "0.59291804", "0.5923295", "0.5919575", "0.5905215", "0.59005713", "0.5896632", "0.5873824", "0.58618134", "0.58590543", "0.58530957", "0.5832044" ]
0.6744593
0
Returns True if the path references a storage managed by this client.
def is_managed_path(self, path): if self._config is None: return False fields = path.split(':', 1) return len(fields) == 2 and fields[0] in self._config
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ismount(path):\n return True if not get_instance(path).relpath(path) else False", "def is_remote(path: Text) -> bool:\n\n # TODO(Alex): add check for another remote storages (s3, ...) when they will be supported\n if path.startswith('gs://'):\n return True\n\n return False", "def allowed(self, request):\n try:\n storage_backend = stx_api.sysinv.get_storage_backend(request)\n if stx_api.sysinv.STORAGE_BACKEND_CEPH in storage_backend:\n return True\n except Exception:\n pass\n return False", "def storage_can_read(self):\n return True", "def is_gcs_path(path):\n return GCS_REGEX.match(path)", "def on_cifs(cls, path: os.PathLike) -> bool:\n return cls.get_mount(path)[1] == \"cifs\"", "def isabs(path):\n # If detected as storage path, it is an absolute path.\n return True", "def is_shared_object(path: str) -> bool:\n return os.path.isfile(path) and path.endswith(\".so\")", "def is_managed(self):\n return getattr(self.local, 'managed', False)", "def is_stone_backend(cls, path):\n path_without_ext, _ = os.path.splitext(path)\n _, second_ext = os.path.splitext(path_without_ext)\n return second_ext == cls.backend_extension", "def is_reference(self):\n return self.resource.is_reference()", "def is_item_in_storage(life, item_uid):\n\tfor container in get_all_storage(life):\n\t\tif item_uid in container['storing']:\n\t\t\treturn True\n\t\n\treturn False", "def exists(redis_client: Redis, root_path) -> bool:\n return bool(redis_client.exists(root_path))", "def exists(self, remote_path, storage_id=None):\n client, remote_path = self._get_storage(remote_path, storage_id=storage_id)\n return client.exists(remote_path)", "def check_instance_shared_storage_local(self, context, instance):\n raise NotImplementedError()", "def mounted(self):\n return os.path.ismount(self.get(\"~mountpoint\", \"/\"))", "def storage_exists(self, request):\n HttpRequest = request.to_http_info(self.api_client.configuration)\n return self.__make_request(HttpRequest, 'GET', 'StorageExist')", "async def has(path: str) -> bool:\n _ = path.strip('/').split('/')\n bucket = _[0]\n key = '/'.join(_[1:])\n async with _create_client() as client:\n try:\n await client.head_object(Bucket=bucket, Key=key)\n return True\n except ClientError:\n return False", "def has_global_storage(self, name: str) -> bool:\n return name in self.global_storage", "def _path_exist(self, stream_name:str=None, version:int=None, user_id:str=None):\n storage_path = self._get_storage_path(stream_name=stream_name, version=version, user_id=user_id)\n if self.nosql_store == \"hdfs\":\n status = self.fs.exists(storage_path)\n elif self.nosql_store==\"filesystem\":\n status = self.fs.path.exists(storage_path)\n else:\n raise Exception(\"Not supported File system\")\n\n if status:\n return True\n else:\n return False", "def ismount(self, vPath):\n return vPath[1:] in self.listdir('/')", "def _is_s3(path:str)->bool:\n return path.startswith(\"s3://\")", "def is_path_registered(path):\n result = db_session.query(MediaFiles).filter_by(path=path).all()\n return True if result else False", "def is_mounted(self):\n try:\n _ = openmediavault.subprocess.check_output(\n [\n 'findmnt',\n '--canonicalize',\n '--first-only',\n '--noheadings',\n '--raw',\n '--nofsroot',\n self.canonical_device_file,\n ]\n )\n return True\n except subprocess.CalledProcessError:\n pass\n return False", "def isRelated(self):\n return len(self.user_storage.all()) > 0", "def __contains__(self, item):\n\n if self.is_view:\n return item in self._view\n return item in self._storage", "def check_cached_item(self, path):\n item_path = '%s/%s' % (\n self.cache_folder,\n path.strip('/')\n )\n\n try:\n self.container.get_object(item_path)\n return '%s/%s' % (self.container.cdn_ssl_uri, item_path)\n except NoSuchObject:\n return False", "def path_exists(self, path):\n\t\tos_path = self._get_os_path(path=path)\n\t\treturn is_folder(self.bucket, os_path)", "def storage_available(self):\n logger.debug('Function storage_available start')\n \n # 2.9 GB\n max_size = 2.9*10**9\n \n if self.total_image_data_size >= max_size:\n logger.info(\"Storage not available\")\n return False\n else:\n logger.info(\"Storage available\")\n return True\n\n logger.debug('Function storage_available end')", "def check_instance_shared_storage_remote(self, context, data):\n raise NotImplementedError()" ]
[ "0.6592271", "0.6509119", "0.64684016", "0.611049", "0.6104739", "0.6081552", "0.6011759", "0.598951", "0.5900278", "0.589274", "0.58603543", "0.58445936", "0.58394146", "0.5830439", "0.57979333", "0.5789618", "0.57753074", "0.57674164", "0.5746314", "0.57142276", "0.570292", "0.568413", "0.5649173", "0.5627868", "0.56272036", "0.5615712", "0.56117636", "0.5603128", "0.5596348", "0.5595511" ]
0.71175957
0
Returns the storage ID and the full path from a managed path.
def parse_managed_path(path): fields = path.split(':', 1) return fields[0], fields[1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split(self, path):\n if not self.is_managed_path(path):\n return os.path.split(path)\n client, _ = self._get_storage(path)\n prefix, rel_path = self.parse_managed_path(path)\n return (\"%s:\" % prefix,) + client.split(rel_path)", "def path(self):\n return self.storage.path(self.name)", "def _get_storage(self, path, storage_id=None):\n if storage_id is None:\n fields = path.split(':', 1)\n if len(fields) == 2 and len(fields[0]) > 1:\n storage_id = fields[0]\n path = fields[1]\n\n if storage_id is not None:\n if storage_id not in self._storages:\n if self._config is None or storage_id not in self._config:\n raise ValueError('unknown storage identifier %s' % storage_id)\n config = self._config[storage_id]\n if config['type'] == 's3':\n credentials = config.get('aws_credentials', {})\n client = storages.S3Storage(\n storage_id,\n config['bucket'],\n access_key_id=credentials.get('access_key_id'),\n secret_access_key=credentials.get('secret_access_key'),\n region_name=credentials.get('region_name'),\n assume_role=credentials.get('assume_role'),\n transfer_config=credentials.get('transfer_config'))\n elif config['type'] == 'swift':\n client = storages.SwiftStorage(\n storage_id,\n config['container'],\n auth_config=config.get('auth_config'),\n transfer_config=config.get('transfer_config'))\n elif config['type'] == 'ssh':\n client = storages.RemoteStorage(\n storage_id,\n config['server'],\n config['user'],\n config.get('password'),\n config.get('pkey'),\n port=config.get('port', 22),\n basedir=config.get('basedir'))\n elif config['type'] == 'http':\n client = storages.HTTPStorage( # pylint: disable=abstract-class-instantiated\n storage_id,\n config['get_pattern'],\n pattern_push=config.get('post_pattern'),\n pattern_list=config.get('list_pattern'))\n elif config['type'] == 'systran_corpusmanager':\n client = storages.CMStorages(\n storage_id,\n config.get('host_url'),\n account_id=config.get('account_id'),\n root_folder=config.get('root_folder'))\n elif config['type'] == 'local':\n client = storages.LocalStorage(\n storage_id,\n basedir=config.get(\"basedir\"))\n else:\n raise ValueError(\n 'unsupported storage type %s for %s' % (config['type'], storage_id))\n self._storages[storage_id] = client\n else:\n client = self._storages[storage_id]\n else:\n client = storages.LocalStorage()\n\n return client, client._internal_path(path)", "def _get_blob_path(self, prefix: str, oid: str) -> str:\n if not self.path_prefix:\n storage_prefix = ''\n elif self.path_prefix[0] == '/':\n storage_prefix = self.path_prefix[1:]\n else:\n storage_prefix = self.path_prefix\n return posixpath.join(storage_prefix, prefix, oid)", "def path(self):\n if self._package:\n return self._package.resourceDir/self._storageName\n else:\n return self._storageName", "def get_storage(self, schema, storage, path, params=None):\n return self.storages[storage](schema, path, params)", "def storage_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"storage_id\")", "def get_path(self):\n\t\treturn call_sdk_function('PrlShare_GetPath', self.handle)", "def getPath(self):\n path = '/'.join(self.getPhysicalPath())\n return path", "def get_storage_vol(self, cont_id, store_backend):\n if store_backend == 'devicemapper':\n dev_name = self.api.inspect_container(\n cont_id)['GraphDriver']['Data']['DeviceName']\n with open('/proc/mounts') as mounts:\n mounts = mounts.read()\n mnt_re = re.compile(r'{} (\\S*)'.format(dev_name))\n mnt_path = re.search(mnt_re, mounts).group(1)\n cont_vol = os.path.join(mnt_path, 'rootfs')\n return cont_vol\n elif store_backend == 'overlay2':\n cont_vol = self.api.inspect_container(\n cont_id)['GraphDriver']['Data']['MergedDir']\n return cont_vol\n\n else:\n raise NotSupportedStorageBackend('Unsupported storage backend')", "def cloud_storage_path(self) -> Optional['outputs.PreventionStoredInfoTypeDictionaryCloudStoragePath']:\n return pulumi.get(self, \"cloud_storage_path\")", "def storage_get(context, storage_id):\n return _storage_get(context, storage_id)", "def get_processed_path(self):\n location = self.get_storage().location\n return self.get_processed_key_name()[len(location):]", "def get_path(self, path_id):\n\t\tpass", "def getID(self):\n return str(self._storage_id)", "def _get_path(self, volume_path):\n return os.path.join(\n self.volume_prefix,\n volume_path.group_id if volume_path.group_id is not None else NO_GROUP_NAME,\n volume_path.volume_id)", "def _get_mount(self):\n if not self._mount.endswith(os.path.sep):\n return \"%s%s\" % (self._mount, os.path.sep)\n else:\n return self._mount", "def storage_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"storage_id\")", "def storage_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"storage_id\")", "def get_path(self):\n return self.sync_path", "def get_obj_path(self, part):\n part_dictionary = self.part_reference.get(part, {})\n return part_dictionary.get(\"full_path\", None)", "def get_path(self):\n return self.path", "def _get_mount_path(self, connection_info):\n share = self._normalize_export(connection_info['data']['export'])\n return os.path.join(self._get_mount_point_base(),\n utils.get_hash_str(share))", "def _get_path_infomation(self):\n long_identifier = self._device_path.split('/')[4]\n protocol, remainder = long_identifier.split('-', 1)\n identifier, _, device_type = remainder.rsplit('-', 2)\n return (protocol, identifier, device_type)", "def __get_path(self):\n return self.path", "def getPath(self):\n return self.path", "def get_location(identifier):\n return STORAGE_LOCATION + \"/\" + identifier[len(ROOT_NODE) + 1:]", "def _get_path(self, protein_id: int):\n protein_name = self.files_refined[protein_id]\n path_protein = os.path.join(\n self.init_refined, protein_name, protein_name + \"_protein.pdb\"\n )\n path_ligand = os.path.join(\n self.init_refined, protein_name, protein_name + \"_ligand.mol2\"\n )\n return path_protein, path_ligand", "def split_datastore_path(datastore_path):\n spl = datastore_path.split('[', 1)[1].split(']', 1)\n path = \"\"\n if len(spl) == 1:\n datastore_name = spl[0]\n else:\n datastore_name, path = spl\n return datastore_name, path.strip()", "def getPath(device):\n # If there is a entry record for this partition in fstab\n # use path in there.\n if device in listEntries():\n path_, fsType_, options_ = getEntry(device)\n return path_\n path = '/media/'\n label = getLabel(device)\n # There may be partitions without a label\n if not label:\n if not os.path.exists(path+'disk'):\n path = path+'disk'\n elif not os.path.ismount(path+'disk'):\n path = path+'disk'\n else:\n for i in range(1, len(getMounted())):\n if not os.path.exists(path+'disk-'+str(i)):\n path = path+'disk-'+str(i)\n break\n elif not os.path.ismount(path+'disk-'+str(i)):\n path = path+'disk-'+str(i)\n break\n # Labels may be same\n else:\n if not os.path.exists(path+label):\n path = path+label\n elif not os.path.ismount(path+label):\n path = path+label\n else:\n for i in range(1, len(getMounted())):\n if not os.path.exists(path+label+'-'+str(i)):\n path = path+label+'-'+str(i)\n break\n elif not os.path.ismount(path+label+'-'+str(i)):\n path = path+label+'-'+str(i)\n break\n return path" ]
[ "0.66156155", "0.6590386", "0.6478085", "0.6305727", "0.62419665", "0.6214927", "0.61787874", "0.61033297", "0.6006285", "0.5967128", "0.5922896", "0.5895419", "0.5891455", "0.58695024", "0.5827752", "0.58000994", "0.57938576", "0.5782561", "0.5782561", "0.57012945", "0.5680097", "0.5675826", "0.56752", "0.5656752", "0.56393456", "0.56188047", "0.5616546", "0.5614817", "0.5605417", "0.56016874" ]
0.66794413
0
Returns the storage implementation based on storage_id or infer it from the path. Defaults to the local filesystem.
def _get_storage(self, path, storage_id=None): if storage_id is None: fields = path.split(':', 1) if len(fields) == 2 and len(fields[0]) > 1: storage_id = fields[0] path = fields[1] if storage_id is not None: if storage_id not in self._storages: if self._config is None or storage_id not in self._config: raise ValueError('unknown storage identifier %s' % storage_id) config = self._config[storage_id] if config['type'] == 's3': credentials = config.get('aws_credentials', {}) client = storages.S3Storage( storage_id, config['bucket'], access_key_id=credentials.get('access_key_id'), secret_access_key=credentials.get('secret_access_key'), region_name=credentials.get('region_name'), assume_role=credentials.get('assume_role'), transfer_config=credentials.get('transfer_config')) elif config['type'] == 'swift': client = storages.SwiftStorage( storage_id, config['container'], auth_config=config.get('auth_config'), transfer_config=config.get('transfer_config')) elif config['type'] == 'ssh': client = storages.RemoteStorage( storage_id, config['server'], config['user'], config.get('password'), config.get('pkey'), port=config.get('port', 22), basedir=config.get('basedir')) elif config['type'] == 'http': client = storages.HTTPStorage( # pylint: disable=abstract-class-instantiated storage_id, config['get_pattern'], pattern_push=config.get('post_pattern'), pattern_list=config.get('list_pattern')) elif config['type'] == 'systran_corpusmanager': client = storages.CMStorages( storage_id, config.get('host_url'), account_id=config.get('account_id'), root_folder=config.get('root_folder')) elif config['type'] == 'local': client = storages.LocalStorage( storage_id, basedir=config.get("basedir")) else: raise ValueError( 'unsupported storage type %s for %s' % (config['type'], storage_id)) self._storages[storage_id] = client else: client = self._storages[storage_id] else: client = storages.LocalStorage() return client, client._internal_path(path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_storage(self, schema, storage, path, params=None):\n return self.storages[storage](schema, path, params)", "def get_storage_backend(self):\n return self.client.info()['Driver']", "def get_storage(local_path=None, redis_index=None):\n from config import STORAGE\n if STORAGE[\"Method\"] == \"local\":\n return LocalStorage(path=local_path or STORAGE.get(\"LocalPath\"))\n elif STORAGE[\"Method\"] == \"redis\":\n return RedisStorage(\n index=redis_index or STORAGE.get(\"RedisIndex\"),\n redis_url=STORAGE.get(\"RedisURL\")\n )\n else:\n raise ValueError(\"Invalid storage method\")", "def get_storage(path=None, options=None):\n path = path or settings.STORAGE\n option = options or {}\n options = options or settings.STORAGE_OPTIONS\n if not path:\n raise ImproperlyConfigured('You must specify a storage class using '\n 'DBBACKUP_STORAGE settings.')\n storage_module = import_module(path)\n return storage_module.Storage(**options)", "def storage_factory(self):\n return load_or_import_from_config(\n 'SIPSTORE_FILE_STORAGE_FACTORY', app=self.app\n )", "def get_storage_class(self) -> Type[Storage]:\n if self.storage_class is None:\n return _get_storage_class()\n return self.storage_class", "def get_storage(self, name):\r\n if name not in self._storages:\r\n for suffix, engine in self.STORAGE_MAP.iteritems():\r\n if name.endswith(suffix):\r\n self._storages[name] = engine(self.get_filepath(name))\r\n break\r\n\r\n if name in self._storages:\r\n return self._storages[name]\r\n else:\r\n raise KeyError('{} does not have a valid suffix'.format(name))", "def get_storage(self):\n return self.storage", "def get_storage(storage_dsn):\n storage_scheme = dsnparse.parse(storage_dsn).scheme\n storage_cls = STORAGE_REGISTRY.get(storage_scheme)\n if not storage_cls:\n logging.error(\"Can't find storage for given dsn.\")\n sys.exit(-1)\n return storage_cls(dsn=storage_dsn)", "def _get_storage_backend(fq_classname):\n LOG.debug('Running _get_storage_backend with fq_classname [%s]'\n % fq_classname)\n\n if not fq_classname:\n return None\n\n (modname, clname) = fq_classname.rsplit('.', 1)\n # A test import of the backend storage class should have been undertaken\n # at app startup in django_drf_filepond.apps.ready so any failure\n # importing the backend should have been picked up then.\n mod = importlib.import_module(modname)\n storage_backend = getattr(mod, clname)()\n LOG.info('Storage backend instance [%s] created...' % fq_classname)\n\n return storage_backend", "def storage(self) -> storage.Storage:\n raise ValueError('Not implemented.')", "def storage_get(context, storage_id):\n return _storage_get(context, storage_id)", "def storage_class(self) -> Optional[Sequence['outputs.CSIPowerStoreSpecDriverStorageClass']]:\n return pulumi.get(self, \"storage_class\")", "def get_storage_vol(self, cont_id, store_backend):\n if store_backend == 'devicemapper':\n dev_name = self.api.inspect_container(\n cont_id)['GraphDriver']['Data']['DeviceName']\n with open('/proc/mounts') as mounts:\n mounts = mounts.read()\n mnt_re = re.compile(r'{} (\\S*)'.format(dev_name))\n mnt_path = re.search(mnt_re, mounts).group(1)\n cont_vol = os.path.join(mnt_path, 'rootfs')\n return cont_vol\n elif store_backend == 'overlay2':\n cont_vol = self.api.inspect_container(\n cont_id)['GraphDriver']['Data']['MergedDir']\n return cont_vol\n\n else:\n raise NotSupportedStorageBackend('Unsupported storage backend')", "def get_storage(store: Optional[StorageEngine] = None) -> StorageEngine:\n if store is not None:\n return store\n else:\n if _storage_stack.top is not None:\n out: StorageEngine = _storage_stack.top\n return out\n else:\n raise RuntimeError(\"No Storage instance available.\")", "def get_filesystem_for_path(self, path: PathType) -> Type[Filesystem]:\n # Assume local path by default, but extract filesystem prefix if available.\n if isinstance(path, str):\n path_bytes = path.encode(\"utf-8\")\n elif isinstance(path, bytes):\n path_bytes = path\n else:\n raise ValueError(\"Invalid path type: %r.\" % path)\n result = re.match(b\"^([a-z0-9]+://)\", path_bytes)\n if result:\n scheme = result.group(1).decode(\"utf-8\")\n else:\n scheme = \"\"\n return self.get_filesystem_for_scheme(scheme)", "def storage_class(self) -> Optional[Sequence['outputs.CSIUnitySpecDriverStorageClass']]:\n return pulumi.get(self, \"storage_class\")", "def get_storage():\n settings = get_settings()\n sources = sum([source is not None for source in (settings.attach_dir,\n settings.attach_s3_bucket,\n settings.attach_gcs_bucket)])\n if sources < 1:\n raise Error(\"At least of the parameters must be set:\",\n \"attach_dir, attach_gcs_bucket, attach_s3_bucket\")\n elif sources > 1:\n raise Error(\"Only one of the parameters must be set:\",\n \"attach_dir, attach_gcs_bucket, attach_s3_bucket\")\n if settings.attach_dir is not None:\n storage = LocalStorage(settings.attach_dir)\n if settings.attach_s3_bucket is not None:\n storage = S3Storage(name=settings.attach_s3_bucket,\n endpoint=settings.attach_s3_endpoint,\n region=settings.attach_s3_region,\n access_key=settings.attach_s3_access_key,\n secret_key=settings.attach_s3_secret_key)\n if settings.attach_gcs_bucket is not None:\n storage = GCSStorage(name=settings.attach_gcs_bucket,\n key=settings.attach_gcs_key)\n return storage", "def get_storage_provider(uri):\n for provider in ProviderFactory.get_storage_providers():\n try:\n supports = provider.supports_storage(uri) # type: ignore[union-attr]\n except BaseException as e:\n communication.warn(f\"Couldn't test provider {provider}: {e}\")\n else:\n if supports:\n return provider(uri=uri) # type: ignore[call-arg]\n\n raise errors.DatasetProviderNotFound(uri=uri)", "def storage_class(self) -> Optional[Sequence['outputs.CSIVXFlexOSSpecDriverStorageClass']]:\n return pulumi.get(self, \"storage_class\")", "def storage_class(self) -> Optional[Sequence['outputs.CSIPowerMaxSpecDriverStorageClass']]:\n return pulumi.get(self, \"storage_class\")", "def get_storage_engine(settings=None):\n if not settings:\n settings = global_settings\n\n return _setup_engine(settings.STORAGE[\"engine\"], settings.STORAGE[\"params\"])", "def storage_factory(cls, prefix, storage_cls=None, storage_uri=None):\n\n key_prefix = '{}_{}_'.format(cls.__name__.upper(), prefix.upper())\n attr = '_{}_storage'.format(prefix)\n if not storage_cls and not storage_uri:\n if not hasattr(cls, attr):\n setattr(cls, attr, cls.storage_cls(cls.storage_uri, key_prefix))\n return getattr(cls, attr)\n return (storage_cls or cls.storage_cls)(storage_uri or cls.storage_uri, key_prefix)", "def storage_class(self) -> Optional[Sequence['outputs.CSIIsilonSpecDriverStorageClass']]:\n return pulumi.get(self, \"storage_class\")", "def storage_type(self) -> str:\n return pulumi.get(self, \"storage_type\")", "def get_swift_storage():\n\n return SwiftStorage(\n os_tenant_id=\"os_tenant_id\",\n os_tenant_name=\"os_tenant_name\",\n os_username=\"os_username\",\n os_password=\"os_password\",\n os_region_name=\"os_region_name\",\n os_storage_url=\"os_storage_url/ralph_logs_container\",\n )", "def _get_storage(self, key):\n return self._Storage(self, key)", "def storage_factory():\n return storage(transaction.manager, **kwargs)", "def _CreateStorageFile(self):\n return sqlite_file.SQLiteStorageFile(storage_type=self._storage_type)", "def getStorageObject(implementation, the_element):\n module=__import__(implementation)\n for i in implementation.split(\".\")[1:]:\n module = getattr(module, i)\n if module:\n cls=None\n for key in module.__dict__.keys():\n import inspect\n if inspect.isclass(getattr(module, key)) and inspect.getclasstree([getattr(module, key)], True)[0][0] == Storage:\n cls=getattr(module, key)\n break\n if cls:\n try:\n inst=object.__new__(cls)\n Storage.log.debug(\"class is %s\" %(cls))\n inst.__init__(element=the_element)\n connname=inst.getConnectionName()\n if not StorageConnections.has_key(connname):\n Storage.log.debug(\"Creating new storage connection %s %s\" %(connname, StorageConnections.keys()))\n StorageConnections[connname]=inst\n return inst\n else:\n Storage.log.debug(\"Returning already established storage connection %s\" %(connname))\n return StorageConnections[connname]\n except:\n import traceback\n traceback.print_exc()\n raise IncompatibleObjectException(cls, Storage)\n else:\n raise IncompatibleObjectException(getattr(module, key), Storage)\n else:\n raise ModuleNotFoundException(implementation)" ]
[ "0.7357804", "0.6921912", "0.68589514", "0.6835108", "0.68159646", "0.6788167", "0.66761404", "0.66322684", "0.65226316", "0.6512314", "0.6474266", "0.64128834", "0.63737255", "0.6356499", "0.63296413", "0.6297468", "0.6296833", "0.6200552", "0.6189922", "0.6164156", "0.6160513", "0.61586773", "0.6151906", "0.6150342", "0.6037884", "0.6025044", "0.6001336", "0.5980588", "0.59773654", "0.59584695" ]
0.7910017
0
Joins the paths according to the storage implementation.
def join(self, path, *paths): if not self.is_managed_path(path): return os.path.join(path, *paths) client, _ = self._get_storage(path) prefix, rel_path = self.parse_managed_path(path) return '%s:%s' % (prefix, client.join(rel_path, *paths)) # Only join the actual path.
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def join(self, path, *paths):", "def __combine_path(self, other):\n self.path = other.path + self.path", "def join_paths(path_1, path_2):\r\n a = lib_path.join(path_1, path_2)\r\n return a", "def join(\n self, store: \"FlattenedStorage\", lsuffix: str = \"\", rsuffix: str = \"\"\n ) -> \"FlattenedStorage\":\n if len(self) != len(store):\n raise ValueError(\n \"FlattenedStorages to be joined have to be of the same length!\"\n )\n if (self[\"length\"] != store[\"length\"]).any():\n raise ValueError(\n \"FlattenedStorages to be joined have to have same length chunks everywhere!\"\n )\n if lsuffix == rsuffix != \"\":\n raise ValueError(\"lsuffix and rsuffix may not be equal!\")\n rename = lsuffix != \"\" or rsuffix != \"\"\n if not rename:\n shared_elements = set(self._per_element_arrays).intersection(\n store._per_element_arrays\n )\n shared_chunks = set(self._per_chunk_arrays).intersection(\n store._per_chunk_arrays\n )\n shared_chunks.remove(\"start_index\")\n shared_chunks.remove(\"length\")\n shared_chunks.remove(\"identifier\")\n if len(shared_elements) > 0 or len(shared_chunks) > 0:\n raise ValueError(\n \"FlattenedStorages to be joined may have common arrays only if lsuffix or rsuffix are given!\"\n )\n\n for k, a in store._per_element_arrays.items():\n if k in self._per_element_arrays and rename:\n self._per_element_arrays[k + lsuffix] = self._per_element_arrays[k]\n k += rsuffix\n self._per_element_arrays[k] = a\n\n for k, a in store._per_chunk_arrays.items():\n if k not in (\"start_index\", \"length\", \"identifier\"):\n if k in self._per_chunk_arrays and rename:\n self._per_chunk_arrays[k + lsuffix] = self._per_chunk_arrays[k]\n k += rsuffix\n self._per_chunk_arrays[k] = a\n\n self._resize_elements(self._num_elements_alloc)\n self._resize_chunks(self._num_chunks_alloc)\n return self", "def testJoin(self):\r\n P=lambda p:ufsi.NativeUnixPath(p)\r\n data={\r\n # 1\r\n 'relativePath':\r\n ['/dir1/',P('dir2/fileBase.ext'),'/dir1/dir2/fileBase.ext'],\r\n\r\n # 2\r\n 'absolutePath':\r\n ['/dir1/',P('/dir2/fileBase.ext'),'/dir2/fileBase.ext'],\r\n\r\n # 3\r\n 'notSeparatorTerminatedPath':\r\n ['dir1',P('dir2/fileBase.ext'),'dir1/dir2/fileBase.ext'],\r\n\r\n # 4\r\n 'emptyPath':\r\n ['dir1',P(''),'dir1/'],\r\n\r\n # 5\r\n 'nonNativePath':\r\n ['dir1',ufsi.HttpPath('http://www.google.com.au/'),\r\n 'http://www.google.com.au/']\r\n }\r\n\r\n for k in data.iterkeys():\r\n p1=P(data[k][0])\r\n p2=data[k][1]\r\n r1=str(p1.join(p2))\r\n r2=data[k][2]\r\n self.assertEquals(r1,r2,\r\n '%s: join result was %r but should have been %r'\r\n %(k,r1,r2))", "def testJoinPath(self):\n test_file_path = self._GetTestFilePath(['utmp-linux_libc6'])\n self._SkipIfPathNotExists(test_file_path)\n\n test_helper = dfvfs_helpers.DFVFSFileSystemHelper(None)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)\n test_helper.OpenFileSystem(path_spec)\n\n path_segments = os.path.split(test_file_path)\n\n path = test_helper.JoinPath(path_segments)\n self.assertEqual(path, test_file_path)", "def join_path(self, path_parts):\n return os.path.sep.join(path_parts)", "def joinPath(path, *args):", "def _link_storage_dirs(self):\n logger.info(\"Linking user_media storage directory\")\n dirs = [\n (name, '%(project_root_src)s/' % env + name)\n for name in LINKED_DIRS\n ]\n\n for name, link_name in dirs:\n with hide(*fab_output_hides):\n storage_dir = os.path.join(MEDIA_STORAGE_ROOT, name)\n sudo('mkdir %s --parents' % storage_dir)\n sudo('chown %s %s' % (F_CHOWN, storage_dir))\n sudo('chmod u+rw,g+rw,o+r,o-w %s' % storage_dir)\n\n sudo('ln -s %s %s' % (storage_dir, link_name))", "def _path_join(self, path):\n return os.path.join(self._path, path)", "def dojoin(ipath1,ipath2,opath):\n r1 = '%s.map' % ipath1\n r2 = '%s.map' % ipath2\n if not mapsMatch(r1,r2):\n print '### maps %s and %s do not match' % (r1,r2)\n sys.exit(1)\n outpath = '%s.map' % opath\n shutil.copyfile(r1,outpath)\n r1 = '%s.eigenstratgeno' % ipath1\n r2 = '%s.eigenstratgeno' % ipath2\n outpath = '%s.eigenstratgeno' % opath\n joinRows(r1,r2,outpath)\n outpath = '%s.ind' % opath\n r1 = '%s.ind' % ipath1\n r2 = '%s.ind' % ipath2\n joinInds(r1,r2,outpath)", "def multi_join(paths, *path_segments):\n return [os.path.join(*(path_segments + (path,))) for path in paths]", "def aix_path_join(path_one, path_two):\n if path_one.endswith('/'):\n path_one = path_one.rstrip('/')\n\n if path_two.startswith('/'):\n path_two = path_two.lstrip('/')\n\n final_path = path_one + '/' + path_two\n return final_path", "def join(path, *paths: str) -> str:\n pass", "def join_path(list):\n return functools.reduce(os.path.join, list)", "def appendPath(paths: List[unicode]) -> unicode:\n ...", "def join_infile_path(*paths):\n # Join path components\n path = '/'.join(paths)\n # Correct double slashes, if any is present\n path = path.replace('//', '/')\n\n return path", "def joinwith(self, path):\n\n return path.joinpath(self._value)", "def join(*paths):\r\n path = \"\"\r\n for component in paths:\r\n path += (\"/\" if path and not path.endswith(\"/\") else \"\") + component.replace(\r\n \"\\\\\", \"/\"\r\n )\r\n return path", "def test_AppendPath(self) -> None:\n p1 = r'C:\\dir\\num\\one;C:\\dir\\num\\two'\n p2 = r'C:\\mydir\\num\\one;C:\\mydir\\num\\two'\n # have to include the pathsep here so that the test will work on UNIX too.\n p1 = AppendPath(p1, r'C:\\dir\\num\\two', sep=';')\n p1 = AppendPath(p1, r'C:\\dir\\num\\three', sep=';')\n assert p1 == r'C:\\dir\\num\\one;C:\\dir\\num\\two;C:\\dir\\num\\three', p1\n\n p2 = AppendPath(p2, r'C:\\mydir\\num\\three', sep=';')\n p2 = AppendPath(p2, r'C:\\mydir\\num\\one', sep=';')\n assert p2 == r'C:\\mydir\\num\\two;C:\\mydir\\num\\three;C:\\mydir\\num\\one', p2\n\n # check (only) last one is kept if there are dupes in new\n p3 = r'C:\\dir\\num\\one'\n p3 = AppendPath(p3, r'C:\\dir\\num\\two;C:\\dir\\num\\three;C:\\dir\\num\\two', sep=';')\n assert p3 == r'C:\\dir\\num\\one;C:\\dir\\num\\three;C:\\dir\\num\\two', p3", "def concatenate_from_paths(cls, paths, schema, **kwargs):\n streams = [open(path, \"rU\") for path in paths]\n return cls.concatenate_from_streams(streams, schema, **kwargs)", "def str_join(paths: []):\n return \"/\".join(paths)", "def combine_paths(paths: Iterable[str], prepend: str, separator: str) -> str:\n\n paths = [\"{}{}\".format(prepend, p) for p in paths]\n return separator.join(paths)", "def normalized_join(path1: str, *pathsN) -> str:\n return normalized_path(os.path.join(path1, *pathsN))", "def join_path(tuple_path):\n return os.path.join(tuple_path[1], tuple_path[1] + tuple_path[2])", "def append_paths(main_paths, paths):\n\tpaths = {key: np.vstack((main_paths[key], paths[key])) for key in main_paths.keys()}\n\treturn paths", "def join_path(base, *args):\n\tfilepath = base\n\tfor arg in args:\n\t\tfilepath = filepath + cfg.SEP_COMM + arg\n\tfilepath = filepath.replace( '//', cfg.SEP_COMM)\n\treturn filepath", "def get_all_path(self, conf):\n\t\tpass", "def _get_prefix_and_relative_path(self, path_list):\n # example of path: s3://custom-bucket/exp-1/exp-1-join-id-time-stamp/train\n # use s3 bucket as prefix\n # allow data from different experiments but in same account\n parts = path_list[0].split(\"/\")\n shared_prefix = \"/\".join(parts[0:3]) # s3://custom-bucket\n key_path_list = []\n\n for path in path_list:\n parts = path.split(\"/\")\n prefix = \"/\".join(parts[0:3])\n if prefix != shared_prefix:\n logger.error(\n f\" Prefix `{prefix}` is different from the shared prefix '{shared_prefix}'. \"\n \"Data in the list are not coming from same s3 bucket.\"\n )\n object_path = \"/\".join(parts[3:])\n key_path_list.append(object_path)\n\n return shared_prefix, key_path_list", "def join(path, *paths):\n\n for p in paths:\n if p.startswith(\"/\"):\n path = p\n elif p != \"\":\n path += (\"\" if path == \"\" or path.endswith(\"/\") else \"/\") + p\n return path" ]
[ "0.6690264", "0.6219887", "0.6160995", "0.6084246", "0.59763473", "0.5885998", "0.5878738", "0.58280677", "0.580559", "0.57253736", "0.56920284", "0.56796026", "0.5673265", "0.56396765", "0.5624865", "0.55687606", "0.5559663", "0.55429184", "0.5537703", "0.55269426", "0.5525742", "0.551371", "0.5485855", "0.54661214", "0.5461982", "0.5439879", "0.54283285", "0.5415135", "0.5403664", "0.53456527" ]
0.6259977
1
Splits the path according to the storage implementation.
def split(self, path): if not self.is_managed_path(path): return os.path.split(path) client, _ = self._get_storage(path) prefix, rel_path = self.parse_managed_path(path) return ("%s:" % prefix,) + client.split(rel_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def splitPath(self, path):\n return os.path.split(path)", "def split(path):\r\n if path.lower().startswith(\"smb://\"):\r\n if '/' not in path[6:]:\r\n path = path.replace(\"smb://\", \"smb:///\", 1)\r\n return path.rsplit('/', 1)\r\n else:\r\n return os.path.split(path)", "def _split_path(self, path):\n if path.strip() in (None, \"\", \"/\"):\n return (None, None)\n tableName, primKey = util.save_split(path.strip(\"/\"), \"/\", 1)\n # _logger.debug(\"'%s' -> ('%s', '%s')\" % (path, tableName, primKey))\n return (tableName, primKey)", "def split_string_path(base, path):\n for i in range(len(path)):\n if isinstance(base, string_types):\n return path[:i], path[i:]\n base = base[path[i]]\n return path, ()", "def testSplitPath(self):\n test_file_path = self._GetTestFilePath(['utmp-linux_libc6'])\n self._SkipIfPathNotExists(test_file_path)\n\n test_helper = dfvfs_helpers.DFVFSFileSystemHelper(None)\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n dfvfs_definitions.TYPE_INDICATOR_OS, location=test_file_path)\n test_helper.OpenFileSystem(path_spec)\n\n expected_path_segments = list(pathlib.Path(test_file_path).parts)\n expected_path_segments.pop(0)\n\n path_segments = test_helper.SplitPath(test_file_path)\n self.assertEqual(path_segments, expected_path_segments)", "def splitpath(self, full=False):\n path = _os.path.split(self.__str__())\n if full == True:\n return self.__str__().split(os.path.sep)\n else:\n return [getpath(path[0], custom=True), path[1]]", "def split_path(self, path):\n path = path.strip(\"/\")\n return path.split(\"/\") if len(path) > 0 else []", "def zenpathsplit(self, path):\n return zenpathsplit(path)", "def splitpath(path):\n\n # FIXME perhaps call op.split repetitively would be better.\n #s = string.split( path, '/' ) # we work with fwd slash only inside.\n\n#We have decided to use all kind of separator\n s = []\n while True:\n first, second = op.split(path)\n s.append(second)\n if first == \"\":\n break\n else:\n path = first\n s.reverse()\n if len(s) == 1 and s[0] == \"\":\n s = []\n return s", "def __split_path(path: str) -> List[str]:\n return [part for part in path.split('/') if part] # Splits path at '/', handles extra slashes in the process", "def _split_key(cls, logical_key):\n if isinstance(logical_key, str):\n path = logical_key.split('/')\n elif isinstance(logical_key, (tuple, list)):\n path = logical_key\n else:\n raise TypeError('Invalid logical_key: %r' % logical_key)\n return path", "def split_path(self, path):\n path = os.path.splitdrive(path)[1][1:]\n folders = []\n while 1:\n path, folder = os.path.split(path)\n if folder != \"\" and folder:\n folders.append(folder)\n if len(path) == 0:\n return folders[::-1]\n else:\n if path != \"\" and path:\n folders.append(path)\n break\n folders.reverse()\n return folders", "def split_path(path):\n parts = []\n path, end = os.path.split(path)\n while end:\n parts.append(end)\n path, end = os.path.split(path)\n\n if path:\n parts.append(path)\n parts.reverse()\n return parts", "def pathSplit(path):\n path = re.split('/|\\\\\\\\', path)\n return path", "def split_path(full_path, root_path):\n root_len = len(root_path)\n parsed_list = full_path[root_len+1:].split('/') \n \n return parsed_list", "def splitdrive(path):\n relative = get_instance(path).relpath(path)\n drive = path.rsplit(relative, 1)[0]\n if drive and not drive[-2:] == '//':\n # Keep \"/\" tail side\n relative = '/' + relative\n drive = drive.rstrip('/')\n return drive, relative", "def split_path(path:str):\n if path is None or len(path) == 0:\n return '', '', ''\n path = sanitize_path(path)\n folder, filename = os.path.split(path)\n ext = ''\n if '.' in filename:\n filename, ext = os.path.splitext(filename)\n # handle double ext, like 'mode.pth.tar'\n filename, ext2 = os.path.splitext(filename)\n ext = ext2 + ext\n else:\n folder = os.path.join(folder, filename)\n filename = ''\n return folder, filename, ext", "def split_path(s):\n dirname, filename = os.path.split(s)\n fname_noext, ext = os.path.splitext(filename)\n levels = dirname.strip('/').split(os.path.sep)[2:][-2:]\n return PATH_SPLIT.split(' '.join(levels + [fname_noext]))", "def splits(self):\n path = os.path.join(self.path(), 'splits')\n os.makedirs(path, exist_ok=True)\n return path", "def pathsplit(path):\n stem, basename = os.path.split(path)\n if stem == '':\n return (basename,)\n if stem == path: # fixed point, likely '/'\n return (path,)\n return pathsplit(stem) + (basename,)", "def path_split(path):\n res = []\n while path:\n path, tail = os.path.split(path)\n res.insert(0, tail)\n if path == '/':\n res.insert(0, '/')\n break\n return res", "def SplitTestPath(test_result, test_path_format):\n if test_path_format == TELEMETRY_TEST_PATH_FORMAT:\n separator = '/'\n elif test_path_format == GTEST_TEST_PATH_FORMAT:\n separator = '.'\n else:\n raise ValueError('Unknown test path format: %s' % test_path_format)\n\n test_path = test_result['testPath']\n if separator not in test_path:\n raise ValueError('Invalid test path: %s' % test_path)\n\n return test_path.split(separator, 1)", "def splitPath(path):\n return tuple(\n element for element in os.path.split(path.rstrip(os.path.sep)) if element\n )", "def split_datastore_path(datastore_path):\n spl = datastore_path.split('[', 1)[1].split(']', 1)\n path = \"\"\n if len(spl) == 1:\n datastore_name = spl[0]\n else:\n datastore_name, path = spl\n return datastore_name, path.strip()", "def _split(self, uri):\n if '/' in uri:\n return uri.split('/', 1)\n return [uri, None]", "def testSplit(self):\r\n data={\r\n # 1\r\n 'emptyPath':\r\n ['',{'fileBase':'',\r\n 'fileExt':None,\r\n 'dirs':[]}],\r\n\r\n # 2\r\n 'fileBaseOnly':\r\n ['fileBase',{'fileBase':'fileBase',\r\n 'fileExt':None,\r\n 'dirs':[]}],\r\n \r\n # 3\r\n 'fileExtOnly':\r\n ['.ext',{'fileBase':'',\r\n 'fileExt':'ext',\r\n 'dirs':[]}],\r\n\r\n # 4\r\n 'fileBaseEmptyFileExt':\r\n ['fileBase.',{'fileBase':'fileBase',\r\n 'fileExt':'',\r\n 'dirs':[]}],\r\n\r\n # 5\r\n 'fullFileName':\r\n ['fileBase.ext',{'fileBase':'fileBase',\r\n 'fileExt':'ext',\r\n 'dirs':[]}],\r\n\r\n # 6\r\n 'singleDir':\r\n ['dir/',{'fileBase':'',\r\n 'fileExt':None,\r\n 'dirs':['dir']}],\r\n\r\n # 7\r\n 'twoDirs':\r\n ['dir1/dir2/',{'fileBase':'',\r\n 'fileExt':None,\r\n 'dirs':['dir1','dir2']}],\r\n\r\n # 8\r\n 'absolutePathTwoDirsFullFileName':\r\n ['/dir1/dir2/fileBase.ext',{'fileBase':'fileBase',\r\n 'fileExt':'ext',\r\n 'dirs':['','dir1','dir2']}],\r\n\r\n # 9\r\n 'dirWithAPeriod':\r\n ['/dir.dirExt/fileBase.fileExt',{'fileBase':'fileBase',\r\n 'fileExt':'fileExt',\r\n 'dirs':['','dir.dirExt']}]\r\n }\r\n\r\n for k in data.iterkeys():\r\n s1=ufsi.NativeUnixPath(data[k][0]).split()\r\n s2=data[k][1]\r\n for s2k in s2.iterkeys():\r\n self.assertEquals(s1[s2k],s2[s2k],\r\n '%s: Item %s of dict %r should be %s'\r\n %(k,s2k,s1,s2[s2k]))", "def split_all(path):\r\n components = []\r\n path = path.lstrip('/')\r\n while path:\r\n head, tail = os.path.split(path)\r\n if tail:\r\n components.insert(0, tail)\r\n elif head == path:\r\n components.insert(0, head)\r\n break\r\n path = head\r\n return components", "def split_path(path):\n\n if type(path) != str:\n return []\n\n # replace multiple occurrences of \"/\" with just one,\n # i.e. \"page1//page2///page3\" -> \"page1/page2/page3\"\n path = re.sub('/+', '/', path)\n path = path.split(\"/\") # form a list of path steps\n path = [x.lower() for x in path if x != \"\"] # filter out empty strings, convert to lowercase\n\n return path", "def split_all(path):\r\n result = []\r\n head = path\r\n while head:\r\n head2, tail = os.path.split(head)\r\n if head2 == head:\r\n break # reached root on Unix or drive specification on Windows\r\n head = head2\r\n result.insert(0, tail)\r\n if head:\r\n result.insert(0, head)\r\n return result", "def filenameSplit (p):\n\tfrom os.path import split as splitPath, splitdrive, splitext\n\t\n\tsplt = splitPath (p)\n\tdisk,dir_ = splitdrive(splt[0])\n\ttry:\n\t\tif disk[1] != \":\":\n\t\t\traise IndexError\n\texcept IndexError:\n\t\tdisk,dir_ = \"\", splt[0]\n\tname,ext = splitext(splt[1])\n\treturn disk,dir_,name,ext" ]
[ "0.7338659", "0.704846", "0.6959433", "0.6954841", "0.68820107", "0.6753386", "0.6746454", "0.6732698", "0.66898245", "0.66167927", "0.6399389", "0.6367646", "0.6342386", "0.6329785", "0.6303435", "0.6253679", "0.6211378", "0.61924195", "0.61387295", "0.6095969", "0.6065212", "0.6058943", "0.60524994", "0.60488886", "0.60454315", "0.6042618", "0.60345364", "0.6034239", "0.6024602", "0.6007647" ]
0.76428443
0
Retrieves a file from remote_path to local_path.
def get_file(self, remote_path, local_path, storage_id=None): return self.get(remote_path, local_path, directory=False, storage_id=storage_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(host, username, remotepath, localpath=None, port=22):\n log = logging.getLogger('device.remotecall')\n log.info('geting file from remote:%s -> %s', remotepath, localpath)\n if not localpath:\n localpath = os.path.split(remotepath)[1]\n cmd = 'scp -P %s %s@%s:%s %s' % (port, username, host, remotepath, localpath)\n try:\n null = open('/dev/null', 'w')\n subprocess.call(shlex.split(cmd), stdin=subprocess.PIPE, stdout=null, stderr=null)\n null.close()\n except Exception as e:\n log.debug('Could not retrieve %s file from %s: Error %s', remotepath, host, e)", "def _get(self, remote_filename, local_path):\n\n with local_path.open('wb') as local_file:\n file_id = self.get_file_id(remote_filename)\n if file_id is None:\n raise BackendException(\n 'File \"%s\" cannot be downloaded: it does not exist' %\n remote_filename)\n\n response = self.http_client.get(\n self.content_url + '/nodes/' + file_id + '/content', stream=True)\n response.raise_for_status()\n for chunk in response.iter_content(chunk_size=DEFAULT_BUFFER_SIZE):\n if chunk:\n local_file.write(chunk)\n local_file.flush()", "def get_file(self, remote_path, local_path):\n try:\n with SCPClient(self.ssh_client.get_transport()) as scp:\n scp.get(remote_path, local_path)\n except SCPException: raise SCPException.message", "def get_remote_file(sid, path):\n with slycat.web.server.remote.get_session(sid) as session:\n return session.get_file(path)", "def download(self, remotepath, localpath):\n sftp = self.connection.open_sftp()\n if isinstance(remotepath, str):\n sftp.get(remotepath, localpath)\n else:\n for path in remotepath:\n filename = os.path.split(path)[-1]\n sftp.get(path, localpath + \"/\" + filename)\n sftp.close()", "def get(self, remote_path, local_path=None):\n self._openSFTPConnection() \n self.sftp.get(remote_path, local_path)", "def get_remote_file_server(client, sid, path):\n with slycat.web.server.remote.get_session_server(client, sid) as session:\n return session.get_file(path)", "def get(self, remote_path, local_path):\n scp_client = SCPClient(self.ssh.get_transport())\n scp_client.get(remote_path, local_path, recursive=True)", "def fetch(args):\n storage, remote_path = split_storage(args.remote)\n\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit(\"Local file %s already exists, not overwriting.\" % local_path)\n\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n\n osf = _setup_osf(args)\n project = osf.project(args.project)\n\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print(\"Local file %s already matches remote.\" % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n\n # only fetching one file so we are done\n break", "def FileGet(self, remote_paths: list, local_destination: str):\n lastChar = local_destination[len(local_destination)-1]\n if lastChar != '/':\n local_destination += '/'\n\n try:\n filegen = (connectme_pb2.FilePath(path=p) for p in remote_paths)\n chunks = self.filemanager.Get(filegen)\n self.fileChunkReceiver(chunks, True, local_destination)\n except grpc.RpcError as e:\n status_code = e.code() # status_code.name and status_code.value\n if grpc.StatusCode.NOT_FOUND == status_code:\n raise FileNotFoundError(e.details()) from e\n else:\n # pass any other gRPC errors to user\n raise e", "def read_file(remote_path):\n conn = _connection()\n try:\n assert conn.connect(IP_ADDRESS, SMB_PORT)\n bytesIO = BytesIO()\n conn.retrieveFile(NAME_OF_SMB_SHARE, remote_path, bytesIO)\n finally:\n conn.close()\n bytesIO.seek(0)\n return bytesIO", "def _download_file(self, artifact_path, local_path):\n full_path = self.base_artifact_path / artifact_path\n with self.managed_folder.get_file(str(full_path)) as remote_file:\n with open(local_path, \"wb\") as local_file:\n for line in remote_file:\n local_file.write(line)", "def download_file(remote_file, local_file=None, key_filename=None, hostname=None, username=None) -> None:\n if local_file is None: # pragma: no cover\n local_file = remote_file\n with get_connection(\n hostname=hostname, username=username, key_filename=key_filename\n ) as connection: # pragma: no cover\n try:\n sftp = connection.open_sftp()\n sftp.get(remote_file, local_file)\n finally:\n sftp.close()", "def ReadRemoteFile(url) -> bytes:\n local_url = download_util.DownloadResource(url)\n return file_util.OpenFile(local_url).read()", "def getFile( self, path, localPath = False ):\n res = self.__checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n successful = {}\n failed = {}\n for src_url in urls:\n fileName = os.path.basename( src_url )\n if localPath:\n dest_file = \"%s/%s\" % ( localPath, fileName )\n else:\n dest_file = \"%s/%s\" % ( os.getcwd(), fileName )\n gLogger.debug( \"DIPStorage.getFile: Executing transfer of %s to %s\" % ( src_url, dest_file ) )\n res = self.__getFile( src_url, dest_file )\n if res['OK']:\n successful[src_url] = res['Value']\n else:\n failed[src_url] = res['Message']\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )", "def scp_get_file(self, source_file, dest_file):\n self.scp_client.get(source_file, dest_file)", "def ftp_get_command(connection, remote_path, local_path):\n try:\n ftp = ftplib.FTP(host=connection.host,\n user=connection.username,\n passwd=connection.password)\n ftp.cwd(os.path.dirname(remote_path))\n name = os.path.basename(remote_path)\n LOG.debug(_(\"ftp GET %(remote_path)s to: %(local_path)s\") % locals())\n with open(local_path, 'w') as ftpfile:\n ftpcmd = 'RETR %s' % name\n ftp.retrbinary(ftpcmd, ftpfile.write)\n ftp.close()\n except Exception:\n LOG.error(_(\"File transfer from PowerVM manager failed\"))\n raise exception.PowerVMFTPTransferFailed(ftp_cmd='GET',\n source_path=remote_path, dest_path=local_path)", "def read_remote_file(remote_command_executor, file_path):\n logging.info(f\"Retrieving remote file {file_path}\")\n result = remote_command_executor.run_remote_command(f\"cat {file_path}\")\n assert_that(result.failed).is_false()\n return result.stdout.strip()", "def get_remote_file(connected_socket, ip, port):\n connected_socket.sendto(bytes(\"get\", \"UTF-8\"), (ip, port))\n remote_path = input(\"Path to Remote File to Get: \")\n connected_socket.sendto(bytes(remote_path, \"UTF-8\"), (ip, port))\n status_message_bytes = connected_socket.recv(BUFFER_SIZE)\n status_message = status_message_bytes.decode(\"UTF-8\")\n if \"Error\" not in status_message:\n file_bytes = bytes()\n loop = True\n while loop: # Reads until all bytes of the file have been received\n try:\n file_bytes += connected_socket.recv(BUFFER_SIZE)\n except socket.timeout: # Time out shows that no more data exists in the socket buffer\n loop = False\n print(\"Expected: \" + status_message + \" bytes | Found: \" + str(len(file_bytes)))\n local_path = input(\"Path to Local Destination: \")\n try:\n with open(local_path, \"wb\") as file:\n file.write(file_bytes)\n print(\"File Saved to \" + local_path)\n except Exception as e:\n print(e)\n else:\n print(status_message)", "def receive_file(username, remote_file, local_file, server='euler.ethz.ch'):\n command = 'scp {0}@{1}:{2} {3}'.format(username, server, remote_file, local_file)\n local_command(command)", "def remote(self, requests, file, remoteHost):\n # Set the source and dest paths\n remote_url = self.base_url + '/remote?file=' + file + \"&host=\" + remoteHost\n\n print(\"Making remote request: \" + remote_url)\n\n r = requests.get(remote_url, max_price=10)\n\n print(\"Remote request completed.\")\n\n return r.json()", "def get(self, remotepath, localpath=None, hadoop=False):\n hadoop = hadoop or remotepath.startswith(\"hdfs\")\n paths = self._sftp_paths(localpath=localpath, remotepath=remotepath)\n if hadoop:\n tmp_path = \"/tmp/{}_{}\".format(\n paths[\"localpath\"].replace(\"/\", \"_\"), time.time()\n )\n self.hadoop(\"get -f\", remotepath, tmp_path)\n paths[\"remotepath\"] = tmp_path\n with self._ssh.open_sftp() as sftp:\n sftp.get(\n remotepath=paths[\"remotepath\"],\n localpath=paths[\"localpath\"],\n callback=self._sftp_progress,\n )\n if hadoop:\n self.exec(\"rm\", paths[\"remotepath\"])\n return", "def read(self, local_path): # noqa: D402\n data_location = self.download_url\n data_location = rewrite_s3_links_locally(data_location)\n response = requests.get(data_location)\n write_file_locally(response.content, local_path)", "def download_file(self, remote_file):\n remote_file.download()", "def get_remote_file(url):\n # Disable the proxies by not trusting the env\n session = requests.Session()\n session.trust_env = False\n\n # Make the request\n requests.packages.urllib3.disable_warnings()\n try:\n r = session.get(url, verify=False)\n except requests.exceptions.RequestException as e:\n # catastrophic error. bail.\n print(e)\n sys.exit(1)\n\n r = session.get(url, verify=False)\n remote_file = r.text\n return remote_file", "def get_remote_file(url, success=200, timeout=10):\n try:\n app.logger.info(\"GET: %s\" % url)\n auth = None\n res = requests.get(url, stream=True, timeout=timeout, auth=auth)\n if res.status_code == success:\n return res.headers.get('Content-Type', 'application/octet-stream'), res.raw.data\n except:\n pass\n return None, None", "def downloadFile(remote_path, fobj):\n logger.msg(\n \"downloading file\", remote_path=remote_path, function='downloadFile'\n )\n\n def file_writer(data):\n fobj.write(data)\n\n remote_path = remote_path.encode('utf-8')\n r = yield treq.get(remote_path, timeout=5)\n try:\n yield treq.collect(r, file_writer)\n except Exception as e:\n print e\n raise", "def get(self, remote_path, local_path='',\n recursive=False, preserve_times=False):\n if not isinstance(remote_path, (list, tuple)):\n remote_path = [remote_path]\n remote_path = [self.sanitize(asbytes(r)) for r in remote_path]\n self._recv_dir = local_path or os.getcwd()\n self._rename = (len(remote_path) == 1 and\n not os.path.isdir(os.path.abspath(local_path)))\n if len(remote_path) > 1:\n if not os.path.exists(self._recv_dir):\n raise SCPException(\"Local path '%s' does not exist\" %\n asunicode(self._recv_dir))\n elif not os.path.isdir(self._recv_dir):\n raise SCPException(\"Local path '%s' is not a directory\" %\n asunicode(self._recv_dir))\n rcsv = (b'', b' -r')[recursive]\n prsv = (b'', b' -p')[preserve_times]\n self.channel = self._open()\n self._pushed = 0\n self.channel.settimeout(self.socket_timeout)\n self.channel.exec_command(b\"scp\" +\n rcsv +\n prsv +\n b\" -f \" +\n b' '.join(remote_path))\n self._recv_all()\n self.close()", "def download(conn, remotepath, localpath, filter = None, ignore_invalid = False, chunk_size = 16000):\n if conn.modules.os.path.isdir(remotepath):\n download_dir(conn, remotepath, localpath, filter)\n elif conn.modules.os.path.isfile(remotepath):\n download_file(conn, remotepath, localpath, chunk_size)\n else:\n if not ignore_invalid:\n raise ValueError(\"cannot download %r\" % (remotepath,))", "def get_local_file(self, no_copy=False):\n return self.get_file(uri_type=URI_LOCAL, no_copy=no_copy)" ]
[ "0.8135602", "0.8111427", "0.7951518", "0.750407", "0.7486695", "0.7328098", "0.7257956", "0.7212995", "0.6986036", "0.69763374", "0.69627047", "0.6884778", "0.6879238", "0.6876323", "0.68586385", "0.6786958", "0.671621", "0.67073643", "0.6648413", "0.6631423", "0.66255814", "0.6593505", "0.65933764", "0.65515244", "0.65270144", "0.6436487", "0.6418469", "0.63787967", "0.6352862", "0.6309808" ]
0.83529526
0
Retrieves a full directory from remote_path to local_path.
def get_directory(self, remote_path, local_path, storage_id=None): return self.get(remote_path, local_path, directory=True, storage_id=storage_id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDirectory( self, path, localPath = False ):\n res = self.__checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n\n failed = {}\n successful = {}\n gLogger.debug( \"DIPStorage.getDirectory: Attempting to get local copies of %s directories.\" % len( urls ) )\n transferClient = TransferClient( self.url )\n for src_dir in urls:\n if localPath:\n dest_dir = localPath\n else:\n dest_dir = os.getcwd()\n if not os.path.exists( dest_dir ):\n os.mkdir( dest_dir )\n res = transferClient.receiveBulk( dest_dir, src_dir )\n if res['OK']:\n gLogger.debug( \"DIPStorage.getDirectory: Successfully got local copy of %s\" % src_dir )\n successful[src_dir] = {'Files':0, 'Size':0}\n else:\n gLogger.error( \"DIPStorage.getDirectory: Failed to get entire directory.\", src_dir )\n failed[src_dir] = res['Message']\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )", "def _remote_path(self):\n return self._remote_dir", "def remote_path(self, volume):\n nfs_share = volume['provider_location']\n share = nfs_share.split(':')[1].rstrip('/')\n return '%s/%s/volume' % (share, volume['name'])", "def download(self, remotepath, localpath):\n sftp = self.connection.open_sftp()\n if isinstance(remotepath, str):\n sftp.get(remotepath, localpath)\n else:\n for path in remotepath:\n filename = os.path.split(path)[-1]\n sftp.get(path, localpath + \"/\" + filename)\n sftp.close()", "def localpath(self, *args):\n return os.path.join(os.path.expanduser(self.serverfiles_dir), *args)", "def get(self, remote_path, local_path):\n scp_client = SCPClient(self.ssh.get_transport())\n scp_client.get(remote_path, local_path, recursive=True)", "def get(host, username, remotepath, localpath=None, port=22):\n log = logging.getLogger('device.remotecall')\n log.info('geting file from remote:%s -> %s', remotepath, localpath)\n if not localpath:\n localpath = os.path.split(remotepath)[1]\n cmd = 'scp -P %s %s@%s:%s %s' % (port, username, host, remotepath, localpath)\n try:\n null = open('/dev/null', 'w')\n subprocess.call(shlex.split(cmd), stdin=subprocess.PIPE, stdout=null, stderr=null)\n null.close()\n except Exception as e:\n log.debug('Could not retrieve %s file from %s: Error %s', remotepath, host, e)", "def get_remote_path(self, local_path, mapped_paths):\n return self.get_local_path(local_path, mapped_paths, reverse=True)", "def _staf_dir_copy(self, local_path, remote_path):\n\n staf_request = ('COPY DIRECTORY \"{0}\" TODIRECTORY \"{1}\" TOMACHINE \"{2}\" RECURSE '\n 'KEEPEMPTYDIRECTORIES'.format(unix_style_path(local_path),\n unix_style_path(remote_path),\n self._sut.network_address))\n\n result = self._staf_handle.submit('local', 'fs', staf_request)\n\n if result.rc != result.Ok:\n raise CoreError(result.result)", "def local_path(self):\n if self.repo_path:\n return self.repo_path\n tmpdir = PurePath(tempfile.gettempdir())\n return str(tmpdir.joinpath('harvest', self.org, self.repo))", "def _get_local_dest(self, path: Path) -> Path:\n dest = \"\"\n\n if str(path).startswith(\"~\"):\n path = path.relative_to(\"~\")\n\n if self.category == \"global\":\n dest = f\"{self.local_base}/global/{path}\"\n elif self.category == \"local\":\n dest = f\"{self.local_base}/local/{path}\"\n else:\n dest = f\"{self.local_base}/custom/{path}\"\n\n return Path(dest)", "def get_file(self, remote_path, local_path, storage_id=None):\n return self.get(remote_path, local_path, directory=False, storage_id=storage_id)", "def resolve_remote_files(config, local_dir, storage_client):\n\n with monitor_activity() as monitor:\n\n def _map_fn(value):\n if not isinstance(value, str) or not storage_client.is_managed_path(value):\n return value\n storage_id, remote_path = storage_client.parse_managed_path(value)\n if remote_path and remote_path[0] == \"/\":\n remote_path = remote_path[1:]\n local_path = os.path.join(local_dir, storage_id, remote_path)\n # can be a file or a directory\n storage_client.get(remote_path, local_path, storage_id=storage_id)\n monitor.notify()\n return local_path\n\n return _map_config_fn(config, _map_fn)", "def in_rwd(path):\n return os.path.join(env.remote_workdir, path)", "def get_remote_working_dir(self):\n return self.remote_working_dir", "def download_cluster(self, remotepath, localpath, merge=False):\n cget = \"getmerge\" if merge else \"get\"\n if isinstance(remotepath, str):\n filename = os.path.split(localpath)[-1]\n self.execute_command(\n \"hdfs dfs -{2} {0} {1}\".format(remotepath, filename, cget))\n self.download(filename, localpath)\n self.execute_command(\"rm {0}\".format(filename))\n else:\n tod = []\n for afile in remotepath:\n filename = os.path.split(afile)[-1]\n self.execute_command(\n \"hdfs dfs -{2} {0} {1}\".format(afile, filename, cget))\n tod.append(filename)\n self.download(tod, localpath)\n for afile in tod:\n self.execute_command(\"rm {0}\".format(afile))\n\n return remotepath", "def get(self, remote_path, local_path='',\n recursive=False, preserve_times=False):\n if not isinstance(remote_path, (list, tuple)):\n remote_path = [remote_path]\n remote_path = [self.sanitize(asbytes(r)) for r in remote_path]\n self._recv_dir = local_path or os.getcwd()\n self._rename = (len(remote_path) == 1 and\n not os.path.isdir(os.path.abspath(local_path)))\n if len(remote_path) > 1:\n if not os.path.exists(self._recv_dir):\n raise SCPException(\"Local path '%s' does not exist\" %\n asunicode(self._recv_dir))\n elif not os.path.isdir(self._recv_dir):\n raise SCPException(\"Local path '%s' is not a directory\" %\n asunicode(self._recv_dir))\n rcsv = (b'', b' -r')[recursive]\n prsv = (b'', b' -p')[preserve_times]\n self.channel = self._open()\n self._pushed = 0\n self.channel.settimeout(self.socket_timeout)\n self.channel.exec_command(b\"scp\" +\n rcsv +\n prsv +\n b\" -f \" +\n b' '.join(remote_path))\n self._recv_all()\n self.close()", "def get(self, remotepath, localpath=None, hadoop=False):\n hadoop = hadoop or remotepath.startswith(\"hdfs\")\n paths = self._sftp_paths(localpath=localpath, remotepath=remotepath)\n if hadoop:\n tmp_path = \"/tmp/{}_{}\".format(\n paths[\"localpath\"].replace(\"/\", \"_\"), time.time()\n )\n self.hadoop(\"get -f\", remotepath, tmp_path)\n paths[\"remotepath\"] = tmp_path\n with self._ssh.open_sftp() as sftp:\n sftp.get(\n remotepath=paths[\"remotepath\"],\n localpath=paths[\"localpath\"],\n callback=self._sftp_progress,\n )\n if hadoop:\n self.exec(\"rm\", paths[\"remotepath\"])\n return", "def _host_dir(self, path):\n return self._host._dir(path)", "def get_local_path(self, remote_path, mapped_paths, reverse=False):\n for remote_prefix, local_prefix in mapped_paths:\n # Reverse. Return mapped remote path for given local path.\n if reverse:\n remote_prefix, local_prefix = local_prefix, remote_prefix\n if remote_path.startswith(remote_prefix):\n local_path = remote_path.replace(\n remote_prefix, local_prefix)\n break\n else:\n local_path = remote_path\n return local_path", "def get(self,\n remote_path,\n local_path,\n directory=False,\n storage_id=None,\n check_integrity_fn=None):\n LOGGER.info('Synchronizing %s to %s', remote_path, local_path)\n client, remote_path = self._get_storage(remote_path, storage_id=storage_id)\n client.get(\n remote_path,\n local_path,\n directory=directory,\n check_integrity_fn=check_integrity_fn)\n if not os.path.exists(local_path):\n raise RuntimeError('Failed to synchronize %s' % local_path)", "def _get(self, remote_filename, local_path):\n\n with local_path.open('wb') as local_file:\n file_id = self.get_file_id(remote_filename)\n if file_id is None:\n raise BackendException(\n 'File \"%s\" cannot be downloaded: it does not exist' %\n remote_filename)\n\n response = self.http_client.get(\n self.content_url + '/nodes/' + file_id + '/content', stream=True)\n response.raise_for_status()\n for chunk in response.iter_content(chunk_size=DEFAULT_BUFFER_SIZE):\n if chunk:\n local_file.write(chunk)\n local_file.flush()", "def remote_mock_dir(mock_data_dir, client, remote_temp_dir):\n\n def _upload(src_path, dest_path):\n with open(src_path, \"rb\") as file_:\n client.upload(dest_path, file_)\n\n copy_tree(mock_data_dir, remote_temp_dir, mkdir_func=client.mkdir, cp_func=_upload)\n\n return str(remote_temp_dir)", "def repo_full_name_from_remote(remote_url):\n # Check whether we have a https or ssh url\n if remote_url.startswith(\"https\"):\n path = urllib.parse.urlparse(remote_url)\n path = path.path\n # Remove the intial '/'\n path = path[1:]\n # Remove extension\n path = os.path.splitext(path)[0]\n else:\n # Remove the initial `git@``\n path = remote_url.split(\"@\")\n path = path[-1] if len(path) > 1 else path[0]\n path = urllib.parse.urlparse(path)\n path = path.path\n # Remove extension\n path = os.path.splitext(path)[0]\n return path", "def get(self, remote_path, local_path=None):\n self._openSFTPConnection() \n self.sftp.get(remote_path, local_path)", "def download(conn, remotepath, localpath, filter = None, ignore_invalid = False, chunk_size = 16000):\n if conn.modules.os.path.isdir(remotepath):\n download_dir(conn, remotepath, localpath, filter)\n elif conn.modules.os.path.isfile(remotepath):\n download_file(conn, remotepath, localpath, chunk_size)\n else:\n if not ignore_invalid:\n raise ValueError(\"cannot download %r\" % (remotepath,))", "def pull(api_client, folder, verbose):\n local_folder, remote_folder = _get_local_and_remote_folders(folder)\n workspace = WorkspaceApi(api_client)\n\n def work():\n workspace.export_workspace_dir(remote_folder, local_folder, True,\n verbose=verbose)\n if not verbose:\n with loadingbar(msg=\"Pulling from {}\".format(remote_folder),\n width=10, fill_char=\"o\", interval=.25):\n work()\n else:\n work()", "def localpath_download(self, *path, **kwargs):\n pathname = self.localpath(*path)\n if not os.path.exists(pathname):\n self.download.unwrapped(self, *path, **kwargs)\n return pathname", "def ftp_get_command(connection, remote_path, local_path):\n try:\n ftp = ftplib.FTP(host=connection.host,\n user=connection.username,\n passwd=connection.password)\n ftp.cwd(os.path.dirname(remote_path))\n name = os.path.basename(remote_path)\n LOG.debug(_(\"ftp GET %(remote_path)s to: %(local_path)s\") % locals())\n with open(local_path, 'w') as ftpfile:\n ftpcmd = 'RETR %s' % name\n ftp.retrbinary(ftpcmd, ftpfile.write)\n ftp.close()\n except Exception:\n LOG.error(_(\"File transfer from PowerVM manager failed\"))\n raise exception.PowerVMFTPTransferFailed(ftp_cmd='GET',\n source_path=remote_path, dest_path=local_path)", "def copy_to_local(self, remote, local):\r\n return self._call(\"-copyToLocal\", remote, local, suppress_output=True)" ]
[ "0.71187484", "0.7090581", "0.6674842", "0.66484684", "0.65908664", "0.6586931", "0.65826195", "0.6580506", "0.65319806", "0.6471554", "0.6446152", "0.6442478", "0.6335604", "0.6271005", "0.6265481", "0.61953896", "0.6186426", "0.6037663", "0.6009096", "0.59784174", "0.5961713", "0.591199", "0.58979344", "0.58834743", "0.5880986", "0.58347285", "0.58237565", "0.57954633", "0.5784625", "0.5779963" ]
0.80905944
0
Returns stat on remote_path file
def stat(self, remote_path, storage_id=None): client, remote_path = self._get_storage(remote_path, storage_id=storage_id) return client.stat(remote_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def file_stat(self, file_path):", "def stat (self, path):\r\n pass", "def stat(self, path):\n return os.stat(path)", "def stat(path: str) -> StatResult:\n return _fs().stat(path)", "def stat(self, path: bytes) -> Any:\n return os.stat(self.storage.path(path.decode()))", "def get_stat(self):\n return os.stat(self.sync_path)", "def lstat(self, path):\n return os.lstat(path)", "def get_file_stat(host, fqpath):\n statformat = '%F:%n:%i:%a:%s:%h:%u:%g:%U:%G'\n command = \"stat -c '%s' %s\" % (statformat, fqpath)\n rcode, rout, rerr = g.run(host, command)\n if rcode == 0:\n stat_data = {}\n stat_string = rout.strip()\n (filetype, filename, inode,\n access, size, links,\n uid, gid, username, groupname) = stat_string.split(\":\")\n\n stat_data['filetype'] = filetype\n stat_data['filename'] = filename\n stat_data[\"inode\"] = inode\n stat_data[\"access\"] = access\n stat_data[\"size\"] = size\n stat_data[\"links\"] = links\n stat_data[\"username\"] = username\n stat_data[\"groupname\"] = groupname\n stat_data[\"uid\"] = uid\n stat_data[\"gid\"] = gid\n\n return stat_data\n\n g.log.error(\"Could not stat file %s: %s\" % (fqpath, rerr))\n return None", "def stat(self, path: bytes) -> Any:\n raise NotImplementedError", "def stat(path: str) -> os.stat_result:\n return Stat._result(path, throw=True) # type: ignore", "def get_file_stat(dir_path, stat_file, filename=None ):\n new_dir_path = os.path.join(dir_path, filename)\n file_stat_cmd = 'stat %s' % new_dir_path\n file_stat = subprocess.Popen(file_stat_cmd, stdout=subprocess.PIPE, shell=True)\n out, err = file_stat.communicate()\n if out:\n with open(stat_file, 'w') as fs:\n fs.write(out)\n else:\n print(\"Command could not run. Please check\")", "def _stat(self, path, _exception_for_missing_path=True):\n return self.__call_with_parser_retry(\n self._real_stat, path, _exception_for_missing_path\n )", "def stat_file(self, path, info):\n return {}", "def _get_media_stat(self, template_name):\n source, filepath = self._get_source_filepath(template_name)\n return os.stat(filepath)", "def _real_lstat(self, path, _exception_for_missing_path=True):\n path = self._path.abspath(path)\n # If the path is in the cache, return the lstat result.\n if path in self._lstat_cache:\n return self._lstat_cache[path]\n # Note: (l)stat works by going one directory up and parsing the output\n # of an FTP `LIST` command. Unfortunately, it is not possible to do\n # this for the root directory `/`.\n if path == \"/\":\n raise ftputil.error.RootDirError(\"can't stat remote root directory\")\n dirname, basename = self._path.split(path)\n # If even the directory doesn't exist and we don't want the exception,\n # treat it the same as if the path wasn't found in the directory's\n # contents (compare below). The use of `isdir` here causes a recursion\n # but that should be ok because that will at the latest stop when we've\n # gotten to the root directory.\n if not self._path.isdir(dirname) and not _exception_for_missing_path:\n return None\n # Loop through all lines of the directory listing. We probably won't\n # need all lines for the particular path but we want to collect as many\n # stat results in the cache as possible.\n lstat_result_for_path = None\n # FIXME: Here we try to list the contents of `dirname` even though the\n # above `isdir` call might/could have shown that the directory doesn't\n # exist. This may be related to ticket #108. That said, we may need to\n # consider virtual directories here (see tickets #86 / #87).\n for stat_result in self._stat_results_from_dir(dirname):\n # Needed to work without cache or with disabled cache.\n if stat_result._st_name == basename:\n lstat_result_for_path = stat_result\n if lstat_result_for_path is not None:\n return lstat_result_for_path\n # Path was not found during the loop.\n if _exception_for_missing_path:\n # TODO: Use FTP `LIST` command on the file to implicitly use the\n # usual status code of the server for missing files (450 vs. 550).\n raise ftputil.error.PermanentError(\n \"550 {}: no such file or directory\".format(path)\n )\n else:\n # Be explicit. Returning `None` is a signal for\n # `_Path.exists/isfile/isdir/islink` that the path was not found.\n # If we would raise an exception, there would be no distinction\n # between a missing path or a more severe error in the code above.\n return None", "def is_file(ssh, file_path):\r\n stdin, stdout, stderr = ssh.exec_command(\"ls -l %s\" % file_path)\r\n err = stderr.read()\r\n# if not err == '': return err.strip().split(\":\")[-1]\r\n if \"No such file or directory\" in err: return -1\r\n\r\n out = stdout.read()\r\n out_list = out.split(\" \")\r\n if out_list[0][0] == '-': return 1\r\n if out_list[0][0] == 'd': return 2\r\n\r\n return -2", "def _real_stat(self, path, _exception_for_missing_path=True):\n # Save for error message.\n original_path = path\n # Most code in this method is used to detect recursive link structures.\n visited_paths = set()\n while True:\n # Stat the link if it is one, else the file/directory.\n lstat_result = self._real_lstat(path, _exception_for_missing_path)\n if lstat_result is None:\n return None\n # If the file is not a link, the `stat` result is the same as the\n # `lstat` result.\n if not stat.S_ISLNK(lstat_result.st_mode):\n return lstat_result\n # If we stat'ed a link, calculate a normalized path for the file\n # the link points to.\n dirname, _ = self._path.split(path)\n path = self._path.join(dirname, lstat_result._st_target)\n path = self._path.abspath(self._path.normpath(path))\n # Check for cyclic structure.\n if path in visited_paths:\n # We had seen this path already.\n raise ftputil.error.RecursiveLinksError(\n \"recursive link structure detected for remote path '{}'\".format(\n original_path\n )\n )\n # Remember the path we have encountered.\n visited_paths.add(path)", "def get_stat(self):\n self.filestat = StatTuple()\n return self.filestat", "def get_remote_file(sid, path):\n with slycat.web.server.remote.get_session(sid) as session:\n return session.get_file(path)", "def testRemote(self):\n try:\n remoteLocator = self.__httpsFileUrl\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n ok = self.__fileU.exists(remoteLocator)\n self.assertTrue(ok)\n size = self.__fileU.size(remoteLocator)\n self.assertGreaterEqual(size, 1000)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def mtime(self, filename):\n return self.sftp.stat(filename).st_mtime", "def _remote_file_size_modtime(ftpobj, remote_file):\n size_in_bytes = ftpobj.size(remote_file)\n modification_time = ftpobj.get_file_mtime(remote_file)\n\n return size_in_bytes, modification_time", "def read_remote_file(remote_command_executor, file_path):\n logging.info(f\"Retrieving remote file {file_path}\")\n result = remote_command_executor.run_remote_command(f\"cat {file_path}\")\n assert_that(result.failed).is_false()\n return result.stdout.strip()", "def file_exists(file_ref, config):\n find_fn = _find_file(config)\n if _is_remote(file_ref):\n _, file_ref = _get_id_fname(file_ref)\n return find_fn(file_ref)", "def get_remote_file_server(client, sid, path):\n with slycat.web.server.remote.get_session_server(client, sid) as session:\n return session.get_file(path)", "def match_stat(dest_path, source_path):\n return shutil.copystat(source_path, dest_path)", "def get_stat(full_path):\n status = {}\n status['size'] = os.path.getsize(full_path)\n status['accessed'] = datetime.datetime.fromtimestamp(os.path.getatime(full_path))\n status['modified'] = datetime.datetime.fromtimestamp(os.path.getmtime(full_path))\n status['changed_any'] = datetime.datetime.fromtimestamp(os.path.getctime(full_path))\n # first 3 digits are User, Group, Other permissions: 1=execute,2=write,4=read\n status['mode'] = os.stat(full_path).st_mode\n status['type'] = get_type(full_path)\n return status", "def _lstat(self, path, _exception_for_missing_path=True):\n return self.__call_with_parser_retry(\n self._real_lstat, path, _exception_for_missing_path\n )", "def get_file(self, remote_path, local_path, storage_id=None):\n return self.get(remote_path, local_path, directory=False, storage_id=storage_id)", "def remote_status():" ]
[ "0.759059", "0.74223083", "0.7068877", "0.6889453", "0.67729634", "0.67529154", "0.67309195", "0.6718362", "0.6688009", "0.6540114", "0.6426513", "0.63638204", "0.6309964", "0.62603474", "0.6134606", "0.61128014", "0.6097762", "0.6071095", "0.60283256", "0.6028293", "0.60082966", "0.6003727", "0.6002452", "0.59727776", "0.5887742", "0.5879562", "0.587478", "0.5860615", "0.5855597", "0.5841418" ]
0.7572192
1
Pushes a local_path file or directory to storage.
def push(self, local_path, remote_path, storage_id=None, lp=None): if not os.path.exists(local_path): raise RuntimeError('%s not found' % local_path) if local_path == remote_path: return None LOGGER.info('Uploading %s to %s', local_path, remote_path) client, remote_path = self._get_storage(remote_path, storage_id=storage_id) return client.push(local_path, remote_path, lp=lp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def local_push_file(job_log_dir, file_path, local_config):\n dest_dir = os.path.join(local_config['path'], job_log_dir)\n dest_filename = os.path.basename(file_path)\n if not os.path.isdir(dest_dir):\n os.makedirs(dest_dir)\n\n dest_file = os.path.join(dest_dir, dest_filename)\n\n shutil.copyfile(file_path, dest_file)\n return local_config['prepend_url'] + os.path.join(job_log_dir,\n dest_filename)", "def upload_file( processor, user, local_path ):\n operations.publish_work_item(\n operations.create_asset_from_file(\n file_name = local_path,\n owner = user,\n producer = processor,\n child_number = 0,\n asset_class = models.AssetClass.UPLOAD ))", "def scp_push_file(job_log_dir, file_path, local_config):\n pass", "def push(store, path):\n url = store[\"url\"]\n if url.startswith(\"git\") or url.endswith(\".git\"):\n push_git(store, path)\n elif url.startswith(\"hg+\"):\n push_hg(store, path)\n elif not os.path.exists(os.path.expanduser(url)):\n raise ValueError(\"Do not know how to push to this kind of storage.\")", "def push(self, localpath, remotepath, timeout=None):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def _cloud_storage_upload(local_file, bucket, filename_on_bucket):\n client = storage.Client()\n\n bucket = client.get_bucket(bucket)\n blob = bucket.blob(filename_on_bucket)\n blob.upload_from_filename(local_file)\n print('uploaded ', bucket, filename_on_bucket)", "def push(self, local_path, device_path, st_mode=constants.DEFAULT_PUSH_MODE, mtime=0, progress_callback=None, transport_timeout_s=None, read_timeout_s=constants.DEFAULT_READ_TIMEOUT_S):\n if not device_path:\n raise exceptions.DevicePathInvalidError(\"Cannot push to an empty device path\")\n if not self.available:\n raise exceptions.AdbConnectionError(\"ADB command not sent because a connection to the device has not been established. (Did you call `AdbDevice.connect()`?)\")\n\n local_path_is_dir, local_paths, device_paths = get_files_to_push(local_path, device_path)\n\n if local_path_is_dir:\n self.shell(\"mkdir \" + device_path, transport_timeout_s, read_timeout_s)\n\n for _local_path, _device_path in zip(local_paths, device_paths):\n opener = _open_bytesio if isinstance(local_path, BytesIO) else open\n with opener(_local_path, 'rb') as stream:\n adb_info = self._open(b'sync:', transport_timeout_s, read_timeout_s, None)\n filesync_info = _FileSyncTransactionInfo(constants.FILESYNC_PUSH_FORMAT, maxdata=self._maxdata)\n\n self._push(stream, _device_path, st_mode, mtime, progress_callback, adb_info, filesync_info)\n\n self._clse(adb_info)", "def _push_stf(self, path: str, dest: str, mode=0o755,\n zipfile_path: str =\"vendor/stf-binaries-master.zip\"):\n with zipfile.ZipFile(zipfile_path) as z:\n if path not in z.namelist():\n logger.warning(\"stf stuff %s not found\", path)\n return\n with z.open(path) as f:\n self._device.sync.push(f, dest, mode)", "def push(api_client, folder, verbose):\n local_folder, remote_folder = _get_local_and_remote_folders(folder)\n workspace = WorkspaceApi(api_client)\n\n def work():\n workspace.import_workspace_dir(local_folder, remote_folder,\n True, False, verbose=verbose)\n if not verbose:\n with loadingbar(msg=\"Pushing to {}\".format(remote_folder), width=10,\n fill_char=\"o\", interval=.25):\n work()\n else:\n work()", "def push(self, path, destination, keep = True):\n self._push_recursive(path, destination, keep)", "def upload(self, remote, local, force = False):\n fl = self.list([ remote ])\n if force == False and remote in fl:\n remote_hash = fl[remote]\n h = hashlib.sha256()\n commonl.hash_file(h, local)\n if remote_hash == h.hexdigest():\n # remote hash is the same, no need to upload\n return\n\n with io.open(local, \"rb\") as inf:\n self.target.ttbd_iface_call(\"store\", \"file\", method = \"POST\",\n file_path = remote,\n files = { 'file': inf })", "def upload(self, localpath, remotepath):\n sftp = self.connection.open_sftp()\n if isinstance(localpath, str):\n if not os.path.exists(localpath):\n raise FileNotFoundError(localpath)\n sftp.put(localpath, remotepath)\n else:\n for f in localpath:\n if not os.path.exists(f):\n raise FileNotFoundError(f)\n sftp.put(f, remotepath + \"/\" + os.path.split(f)[-1])\n sftp.close()", "def push(args):\n if args.type == 'ssh':\n cache = set(args.remote_cache).union(set(args.cache))\n for path in sorted(cache):\n if os.path.exists(os.path.join(args.base, path)) and not remote_exists(args.sftp, os.path.join(args.remote_base, path)):\n print('push: {}'.format(path))\n ensure_remote(args.sftp, os.path.dirname(os.path.join(args.remote_base, path)))\n args.sftp.put(\n os.path.join(args.base, path),\n os.path.join(args.remote_base, path)\n )\n args.remote_cache.append(path)\n args.remote_update = True\n elif args.type == 's3':\n raise NotImplementedError('s3:// remote type not yet supported!')\n elif args.type == 'gs':\n raise NotImplementedError('gs:// remote type not yet supported!')\n return", "def push(self, filepath):\n logger.debug(\"Starting to push %r\", str(filepath))\n\n def _progress(monitor):\n # XXX Facundo 2020-07-01: use a real progress bar\n if monitor.bytes_read <= monitor.len:\n progress = 100 * monitor.bytes_read / monitor.len\n print(\"Uploading... {:.2f}%\\r\".format(progress), end=\"\", flush=True)\n\n with filepath.open(\"rb\") as fh:\n encoder = MultipartEncoder(\n fields={\"binary\": (filepath.name, fh, \"application/octet-stream\")}\n )\n\n # create a monitor (so that progress can be displayed) as call the real pusher\n monitor = MultipartEncoderMonitor(encoder, _progress)\n response = _storage_push(monitor, self.storage_base_url)\n\n if not response.ok:\n raise CommandError(\n \"Failure while pushing file: [{}] {!r}\".format(\n response.status_code, response.content\n )\n )\n\n result = response.json()\n if not result[\"successful\"]:\n raise CommandError(\"Server error while pushing file: {}\".format(result))\n\n upload_id = result[\"upload_id\"]\n logger.debug(\"Uploading bytes ended, id %s\", upload_id)\n return upload_id", "def upload(self, file_path: str, remote_name: str = None) -> dict:\n url = self.get_method_url('storage', 'upload')\n remote_name = self.get_remote_name(file_path, remote_name)\n with open(file_path, 'rb') as file:\n files = {\n 'payload': file,\n 'file_name': remote_name\n }\n json_data = self.request(\n url=url,\n files=files,\n method='POST'\n )\n self.remote_app = json_data\n return self.remote_app", "def upload_local_file(self, path_to_file, name):\n file1 = self._drive.CreateFile()\n file1.SetContentFile(path_to_file)\n file1['title'] = name\n file1.Upload()\n print('File successfully uploaded!')", "def upload_file(local_path, s3_path):\n with open(local_path, 'rb') as binary_data:\n s3.Bucket(bucket_name).put_object(Key=s3_path, Body=binary_data)", "def PushWorkload(vm, workload_file, remote_path):\n if os.path.basename(remote_path):\n vm.RemoteCommand('sudo rm -f ' + remote_path)\n vm.PushFile(workload_file, remote_path)", "def upload_bam(bam_s3_path, local_folder_path):\n\n upload_folder(bam_s3_path, local_folder_path)", "def set_file_storage(source='local', **kwargs):\n pass", "def put_object(local_path: str, file_name: str, configuration):\n pass", "def swift_push_file(job_log_dir, file_path, swift_config):\n with open(file_path, 'r') as fd:\n name = os.path.join(job_log_dir, os.path.basename(file_path))\n con = swiftclient.client.Connection(\n authurl=swift_config['authurl'],\n user=swift_config['user'],\n key=swift_config['password'],\n os_options={'region_name': swift_config['region']},\n tenant_name=swift_config['tenant'],\n auth_version=2.0)\n con.put_object(swift_config['container'], name, fd)\n return swift_config['prepend_url'] + name", "def upload_to_storage_client(self, **kwargs):\n if 'source_path' in kwargs:\n source_path = kwargs.get('source_path')\n else:\n raise ValueError(\"Must provide the \\'source_path\\' parameter for local storage client to find the file!\")\n\n if 'destination_path' in kwargs:\n destination_path = kwargs.get('destination_path')\n else:\n raise ValueError(\n \"Must provide the \\'destination_path\\' parameter for local storage client to find the destination!\")\n\n compression = kwargs.get('compression')\n intended_stored_file_name = kwargs.get('intended_stored_file_name', None)\n\n if not os.path.isdir(source_path) and compression:\n raise ValueError(\"Only directories can be zipped. Single files cannot be zipped.\")\n\n self.__check_dir(destination_path)\n\n upload_parameters = {'source_path': source_path, 'compression': compression,\n 'destination_path': destination_path}\n\n if compression:\n # TODO\n # if no name is supplied, name of the zipfile will be the destination; currently funky at move() due to source =/= location\n if not intended_stored_file_name:\n file_name = os.path.split(source_path)[-1]\n intended_stored_file_name = 'archive_' + file_name + \"_\" + datetime.now().strftime(\"%A_%d_%B_%Y_%I_%M%p\")\n upload_parameters['intended_stored_file_name'] = intended_stored_file_name\n\n # compression can only happen on DIRECTORIES, and not on single files\n # compress2 takes a name from the kwargs, and source from parameter of save_local\n compress(intended_stored_file_name, source_path)\n\n # TODO: Perhaps zipfile dumping location can be found by getting a parent/ child from source\n\n # To find where compress2 dumps zipfile, currently: working directory path\n location = self.__prj_root_dir + \"\\\\\" + intended_stored_file_name + \".zip\"\n shutil.move(location, destination_path)\n\n upload_parameters['intended_stored_file_name'] = intended_stored_file_name\n upload_parameters['upload_date_time'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n self.generate_json_upload_parameters(**upload_parameters)\n else:\n upload_parameters['intended_stored_file_name'] = intended_stored_file_name\n upload_parameters['upload_date_time'] = datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n shutil.move(source_path, destination_path)\n self.generate_json_upload_parameters(**upload_parameters)", "def put_upload(self):\n # print \"starting upload...\", self.current_upload['filepath']\n self.touch()\n self.log(\"STARTING_UPLOAD\", level=INFO)\n try:\n Backend.put_file(self.fileobj, self.current_upload[\"gcs_url\"])\n except exceptions.FilePutError as err:\n self.handle_put_error(err, self.fileobj)\n raise", "def upload(args):\n osf = _setup_osf(args)\n if osf.username is None or osf.password is None:\n sys.exit('To upload a file you need to provide a username and'\n ' password.')\n\n project = osf.project(args.project)\n storage, remote_path = split_storage(args.destination)\n if remote_path == '':\n remote_path = os.path.split(args.source)[-1]\n\n store = project.storage(storage)\n if args.recursive:\n if not os.path.isdir(args.source):\n raise RuntimeError(\"Expected source ({}) to be a directory when \"\n \"using recursive mode.\".format(args.source))\n\n # local name of the directory that is being uploaded\n _, dir_name = os.path.split(args.source)\n\n for root, _, files in os.walk(args.source):\n subdir_path = os.path.relpath(root, args.source)\n for fname in files:\n local_path = os.path.join(root, fname)\n with open(local_path, 'rb') as fp:\n # build the remote path + fname\n name = os.path.join(remote_path, dir_name, subdir_path,\n fname)\n store.create_file(name, fp, force=args.force,\n update=args.update)\n\n else:\n with open(args.source, 'rb') as fp:\n store.create_file(remote_path, fp, force=args.force,\n update=args.update)", "def publish(self, path):\n self.logger.info(\"Publishing %s\", path)\n try:\n self.set_workspace()\n workspace_path = getcwd()\n if workspace_path != commonpath([workspace_path, abspath(path)]):\n self.logger.error(\"Attempt to publish a non-local file %s\", path)\n raise ContextError(\n f\"Only local workspace files can be published! PATH={path}\"\n )\n if not isfile(path):\n self.logger.error(\"Attempt to publish a non-file path %s\", path)\n raise ContextError(f\"Only files can be published! PATH={path}\")\n # publish the file\n target_path = join(self._path_perm, relpath(path))\n targer_url = urljoin(self._url_base, relpath(path))\n if not isdir(self._path_perm):\n raise MissingContextError(\n f\"Permanent directory does not exist! PATH={self._path_perm}\"\n )\n if not exists(dirname(target_path)):\n makedirs(dirname(target_path))\n move(path, target_path)\n except Exception as error:\n self.logger.warning(\"Failed to publish %s! %s\", path, error)\n raise\n self.logger.debug(\"moved %s -> %s\", path, target_path)\n return target_path, targer_url", "def UploadFile(self, local_file_name, gcs_file_name,\n mimetype='application/octet-stream'):\n resumable = os.stat(local_file_name).st_size > 0\n media = gapi_http.MediaFileUpload(local_file_name,\n mimetype=mimetype,\n resumable=resumable)\n\n # gsutil's code suggests that 404s and 410s are retryable for resumable\n # uploads (see ResumableUploadStartOverException).\n def _ErrorMatcher(error):\n return (self._CommonErrorMatcher(error)\n or (isinstance(error, gapi_errors.HttpError)\n and error.resp.status in (404, 410)))\n\n return self._RunWithRetries(\n lambda: self._UploadWithProgress(media, gcs_file_name),\n _ErrorMatcher)", "def upload_cluster(self, localpath, remotepath):\n if isinstance(localpath, str):\n filename = os.path.split(localpath)[-1]\n self.upload(localpath, filename)\n self.execute_command(\n \"hdfs dfs -put {0} {1}\".format(filename, remotepath))\n self.execute_command(\"rm {0}\".format(filename))\n else:\n self.upload(localpath, \".\")\n for afile in localpath:\n filename = os.path.split(afile)[-1]\n self.execute_command(\n \"hdfs dfs -put {0} {1}\".format(filename, remotepath))\n self.execute_command(\"rm {0}\".format(filename))\n\n return remotepath", "def upload_file(bucket, local_file_path, remote_destination_path):\n bucket = get_bucket(bucket)\n k = Key(bucket)\n k.key = remote_destination_path\n k.set_contents_from_filename(local_file_path)", "def _storage_push(monitor, storage_base_url):\n url = storage_base_url + \"/unscanned-upload/\"\n headers = {\n \"Content-Type\": monitor.content_type,\n \"Accept\": \"application/json\",\n \"User-Agent\": build_user_agent(),\n }\n retries = Retry(total=5, backoff_factor=2, status_forcelist=[500, 502, 503, 504])\n\n with requests.Session() as session:\n session.mount(\"https://\", HTTPAdapter(max_retries=retries))\n\n try:\n response = session.post(url, headers=headers, data=monitor)\n except RequestException as err:\n raise CommandError(\n \"Network error when pushing file: {}({!r})\".format(\n err.__class__.__name__, str(err)\n )\n )\n\n return response" ]
[ "0.65923005", "0.65466136", "0.653534", "0.64600855", "0.64583194", "0.6253895", "0.6151398", "0.61167777", "0.6061019", "0.6037784", "0.6037443", "0.5991548", "0.59851855", "0.59710395", "0.59525883", "0.5892432", "0.58776206", "0.5817515", "0.57922804", "0.5791136", "0.57558256", "0.57157046", "0.5690412", "0.5682649", "0.5674413", "0.5641497", "0.56090146", "0.5593519", "0.55853254", "0.55745316" ]
0.7437701
0
Delete segments from a corpus in a storage.
def seg_delete(self, remote_path, corpus_id, seg_ids, storage_id=None): client, remote_path = self._get_storage(remote_path, storage_id=storage_id) return client.seg_delete(corpus_id, seg_ids)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete_network_segments(self, tenant_id, network_segments):", "def del_segment_translations(*args):\n return _ida_segment.del_segment_translations(*args)", "def test_deleting_a_segment(self):\n pass", "def del_segm(*args):\n return _ida_segment.del_segm(*args)", "def delete(fits: Optional[str], start: Optional[str], end: Optional[str], out: Optional[str]):\n delete_in_ssda(fits=fits, start=start, end=end, out=out)", "def delete(self, prefix, paths):\n pass", "def delete_terms(self, *terms):\n result = self.sequence\n for term in ANCOVA(*terms).sequence:\n result.remove(term)\n return ANCOVA(*result)", "def wipe(self, segments):\n self.firstCoords = None\n self.moveLead(MIDDLE, MIDDLE)\n for seg in self.segs:\n self.can.delete(seg.getGraphicObject())\n seg.rmGraphicObject()\n self.segs = segments\n self.redrawSegs()", "def delete(self, *args, **kwargs):\n print(\"form delete\")\n self.is_deleted = True\n current_section_sequence = self.section_sequence\n\n #This can be modified if we have to hard delete the sections\n\n # for sec_id in current_section_sequence:\n # current_section = Sections.objects.get(id = sec_id )\n # current_section.delete()\n\n self.save()", "def delete_disks(self, storage_elems):\n raise NotImplementedError()", "def deleteSelectedSegs(self):\n inds = []\n for ix in range(len(self.picbuttons)):\n if self.picbuttons[ix].mark == 'yellow':\n inds.append(ix)\n\n if len(inds)==0:\n print(\"No segments selected\")\n return\n\n self.segsChanged = True\n for ix in reversed(inds):\n del self.segments[ix]\n del self.picbuttons[ix]\n\n # update self.clusters, delete clusters with no members\n todelete = []\n for ID, label in self.clusters.items():\n empty = True\n for seg in self.segments:\n if seg[-1] == ID:\n empty = False\n break\n if empty:\n todelete.append(ID)\n\n self.clearButtons()\n\n # Generate new class labels\n if len(todelete) > 0:\n keys = [i for i in range(self.nclasses) if i not in todelete] # the old keys those didn't delete\n # print('old keys left: ', keys)\n\n nclasses = self.nclasses - len(todelete)\n max_label = nclasses - 1\n labels = []\n c = self.nclasses - 1\n while c > -1:\n if c in keys:\n labels.append((c, max_label))\n max_label -= 1\n c -= 1\n\n labels = dict(labels)\n # print(labels)\n\n # update clusters dictionary {ID: cluster_name}\n clusters = {}\n for i in keys:\n clusters.update({labels[i]: self.clusters[i]})\n\n print('before delete: ', self.clusters)\n self.clusters = clusters\n print('after delete: ', self.clusters)\n\n # update the segments\n for seg in self.segments:\n seg[-1] = labels[seg[-1]]\n\n self.nclasses = nclasses\n\n # redraw the buttons\n self.updateButtons()\n self.completeChanged.emit()", "def delete_segment(self, name: str) -> None:\n self._status.check_authority_for_draft()\n\n delete_data: Dict[str, Any] = {\"segmentName\": name}\n delete_data.update(self._status.get_status_info())\n\n self._client.open_api_do(\"DELETE\", \"segments\", self.dataset_id, json=delete_data)", "def delete_cluster(self):", "def delete(self):\n if not self.selection.isSelection(): return False\n\n # Save the current text\n self.saveText()\n\n sm1, sm2 = self.selection.order(self.selection.selectionMark,\n self.selection.selectionMark2)\n w1 = sm1[0]\n w2 = sm2[0]\n cx = sm1[1]\n self.edCursor.setPos(w1, cx)\n # Join words before and after selection\n w1.setString(w1.string[:cx] + w2.string[sm2[1]:])\n # Delete all intervening words, and w2\n tl1 = w1.tline\n wx1 = tl1.twords.index(w1)\n tl2 = w2.tline\n wx2 = tl2.twords.index(w2)\n if (tl1 == tl2): # only delete from 1 line\n # delete words from wx1+1 to wx2 (incl.)\n for w in tl1.twords[wx1+1:wx2+1]:\n w.delete()\n del(tl1.twords[wx1+1:wx2+1])\n\n else: # deletion block covers >1 line\n # delete words from wx1+1 to end of paragraph\n for w in tl1.twords[wx1+1:]:\n w.delete()\n del(tl1.twords[wx1+1:])\n # delete all the intervening lines\n while True:\n tl = self.rsubject.nextLine(tl1)\n if (tl == tl2): break\n self.rsubject.deleteTLine(tl)\n\n # Move remaining words after w2 in tl2 to end of tl1\n for w in tl2.twords[wx2+1:]:\n tl1.insert(w)\n del(tl2.twords[wx2+1:])\n # Delete tl2\n self.rsubject.deleteTLine(tl2)\n\n self.selection.clearSelection()\n\n self.rsubject.renderShortened(w1)\n\n self.edCursor.setPos(w1, cx)\n return True", "def delete():", "def delete(self, first, last, insert=\"\"):\n assert all(new in self.ALPHABET for new in insert)\n if first < 1 or last > len(self.sequence):\n raise ValueError(f\"Deletion {first}-{last} out of bounds for given sequence.\")\n self.sequence = f\"{self.sequence[: first - 1]}{insert}{self.sequence[last:]}\"\n if \"mutations\" in self.metadata.keys():\n self.metadata[\"mutations\"] += f\" del{first}-{last}{insert}\"\n else:\n self.metadata[\"mutations\"] = f\"del{first}-{last}{insert}\"", "def delete_segment(self, n):\n self.get_segment(n).delete()", "def solr_delete(instances):\n __solr_prepare(instances)", "def delete(self, structure, sentence) -> List[AcabNode]:\n raise NotImplementedError()", "def delete(self, structure, sentence) -> List[AcabNode]:\n raise NotImplementedError()", "def delete(self, docs: DocumentArray, *args, **kwargs):\n cursor = self.connection.cursor()\n psycopg2.extras.execute_batch(\n cursor,\n f'DELETE FROM {self.table} where (ID) = (%s);',\n [(doc.id,) for doc in docs],\n )\n self.connection.commit()\n return", "def delete_tenant_bulk(self, tenant_list, sync=False):", "def delete_corpus_manager(self, remote_path, corpus_id, storage_id=None):\n client, remote_path = self._get_storage(remote_path, storage_id=storage_id)\n return client.delete_corpus_manager(corpus_id)", "def delete_network_segment(context, segment_id):\n with db_api.context_manager.writer.using(context):\n network_obj.NetworkSegment.delete_objects(context, id=segment_id)", "def del_selector(*args):\n return _ida_segment.del_selector(*args)", "def remove_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n segment_name = kwargs[\"objectname\"]\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n if len(segment['results']) > 0:\n segment_path = segment['results'][0]['path']\n status = remove_segment_json(proxy, sessiontoken, segment_path)\n if status == 200:\n print(f'The following network has been removed: {segment_name}')\n else:\n print(\"The segment was not removed. Please check your syntax and try again.\")\n sys.exit(1)\n else:\n print(\"The segment does not exist.\")", "def deleteDocument(cred, documentPaths):\n for documentPath in documentPaths:\n url = cred.base_url + \"documents/\" + documentPath\n\n makeRequest(cred, url, 'DELETE')", "def keep_documents(self, idx):\n print('{} documents have been removed'.format(self.data.shape[0] - len(idx)))\n self.documents = [self.documents[i] for i in idx]\n self.labels = self.labels[idx]\n self.data = self.data[idx, :]", "def delete_dataset(self, dataset: DatasetDB):\n try:\n self._es.delete_index(dataset_records_index(dataset.id))\n finally:\n self._es.delete_document(index=DATASETS_INDEX_NAME, doc_id=dataset.id)", "def delete(self,\n tier1_id,\n segment_id,\n ):\n return self._invoke('delete',\n {\n 'tier1_id': tier1_id,\n 'segment_id': segment_id,\n })" ]
[ "0.6602147", "0.63999766", "0.62061095", "0.59552914", "0.592743", "0.5694561", "0.5608127", "0.55144745", "0.55131215", "0.5494817", "0.5445123", "0.54226285", "0.5414766", "0.5398532", "0.5384618", "0.53805983", "0.53650475", "0.5341641", "0.53285897", "0.53285897", "0.5327579", "0.5302852", "0.5296081", "0.52511996", "0.52504414", "0.52497697", "0.5206897", "0.52021617", "0.519626", "0.5191547" ]
0.68825424
0
Modify segment from a corpus in a storage.
def seg_modify(self, remote_path, corpus_id, seg_id, tgt_id, tgt_seg, src_seg, storage_id=None): client, remote_path = self._get_storage(remote_path, storage_id=storage_id) return client.seg_modify(corpus_id, seg_id, tgt_id, tgt_seg, src_seg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_updating_a_segment(self):\n pass", "def change_segment(self):\n logging.debug(\"change_segment\")\n word_string_list = list(self.word_string) # Making a mutable list from immutable string\n index_of_change = randint(0, len(self.word_string)-1)\n old_segment = word_string_list[index_of_change]\n\n segment_options_list = self.feature_table.get_alphabet()\n segment_options_list.remove(old_segment) # Making sure that the new segment is not identical to segment being replaced\n\n if not segment_options_list: # there are no change candidates\n return False\n\n new_segment = choice(segment_options_list)\n word_string_list[index_of_change] = new_segment\n new_word_string = ''.join(word_string_list)\n self._set_word_string(new_word_string)\n return True", "def modify_transforms(self, keys, values, first_segment, last_segment = None):\n if not last_segment:\n last_segment = float('inf')\n \n for dx, segment in enumerate(self.story):\n if dx >= first_segment:\n if dx <= last_segment:\n segment.modify_transforms(keys,values)", "def update_from_document(self, document_path):\n with open(document_path, 'r') as document_file:\n for sentence in document_file:\n words = sentence.strip().split()\n for word in words:\n self._add_new_word(word)", "def assign_actual(segments_path, training_path):\n pass", "def updatestem(self):\n self.stem = self.stemEdit.text()\n self.setdata()", "def switch_segment_places(self, segment_idx1, segment_idx2):\n self.story[segment_idx1], self.story[segment_idx2] = self.story[segment_idx2], self.story[segment_idx1]", "def cleanup(segment):\n cnt = ''.join(segment.file_content)\n index = cnt.find('\\\\annotate')\n if index < 0:\n return\n while index >= 0:\n cnt, new_ind = parse_annotation(cnt, index)\n index = cnt.find('\\\\annotate', new_ind)\n f = codecs.open(segment.filename, 'w', 'utf-8')\n f.write(cnt)\n f.close()\n info('Updated: {} {}'.format(segment.voice_name, segment.name))", "def __setitem__(self, word):\n raise ValueError(\"Vocabulary is only readable, if you want to set new k-v pair, use vocab.add()\")", "def update_index(self, document):\n\t\tix = self.get_index()\n\n\t\twith ix.searcher():\n\t\t\twriter = AsyncWriter(ix)\n\t\t\twriter.delete_by_term(self.id, document[self.id])\n\t\t\twriter.add_document(**document)\n\t\t\twriter.commit(optimize=True)", "def updateAnnotations(self):\n self.backupDatafiles()\n print(\"Updating annotation files \", self.field(\"trainDir\"))\n listOfDataFiles = QDir(self.field(\"trainDir\")).entryList(['*.data'])\n for file in listOfDataFiles:\n # Read the annotation\n segments = Segment.SegmentList()\n newsegments = Segment.SegmentList()\n segments.parseJSON(os.path.join(self.field(\"trainDir\"), file))\n allSpSegs = np.arange(len(segments)).tolist()\n newsegments.metadata = segments.metadata\n for segix in allSpSegs:\n seg = segments[segix]\n if self.field(\"species\") not in [fil[\"species\"] for fil in seg[4]]:\n newsegments.addSegment(seg) # leave non-target segments unchanged\n else:\n for seg2 in self.segments:\n if seg2[1] == seg:\n # find the index of target sp and update call type\n seg[4][[fil[\"species\"] for fil in seg[4]].index(self.field(\"species\"))][\"calltype\"] = self.clusters[seg2[-1]]\n newsegments.addSegment(seg)\n newsegments.saveJSON(os.path.join(self.field(\"trainDir\"), file))", "def segment(data):", "def _rebaseSegment(\n\t\tself,\n\t\tpageStarts: Tuple[int],\n\t\tsegment: segment_command_64\n\t) -> None:\n\n\t\t# check if the segment is included in the mapping\n\t\tif not (\n\t\t\tsegment.vmaddr >= self.mapping.address\n\t\t\tand segment.vmaddr < self.mapping.address + self.mapping.size\n\t\t):\n\t\t\treturn\n\n\t\tctx = self.machoCtx.ctxForAddr(segment.vmaddr)\n\n\t\t# get the indices of relevent pageStarts\n\t\tdataStart = self.mapping.address\n\t\tpageSize = self.slideInfo.page_size\n\n\t\tstartAddr = segment.vmaddr - dataStart\n\t\tstartIndex = int(startAddr / pageSize)\n\n\t\tendAddr = ((segment.vmaddr + segment.vmsize) - dataStart) + pageSize\n\t\tendIndex = int(endAddr / pageSize)\n\t\tif endIndex == len(pageStarts) + 1:\n\t\t\tendIndex -= 2\n\t\t\tpass\n\n\t\tfor i in range(startIndex, endIndex):\n\t\t\tpage = pageStarts[i]\n\n\t\t\tif page == DYLD_CACHE_SLIDE_PAGE_ATTR_NO_REBASE:\n\t\t\t\tpass\n\t\t\telif page & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA:\n\t\t\t\tpageAddr = (i * pageSize) + self.mapping.address\n\t\t\t\tself.logger.warning(f\"Unable to handle page extras at {hex(pageAddr)}\")\n\t\t\telif (page & DYLD_CACHE_SLIDE_PAGE_ATTR_EXTRA) == 0:\n\t\t\t\tpageOff = (i * pageSize) + self.mapping.fileOffset\n\n\t\t\t\t# The page offset are 32bit jumps\n\t\t\t\tself._rebasePage(ctx, pageOff, page * 4)\n\n\t\t\t\tself.statusBar.update(status=\"Rebasing Pages\")\n\t\tpass", "def __setitem__(self, idx, doc):\n if doc is None:\n return\n assert isinstance(idx, int)\n assert isinstance(doc, Document)\n path = self.paths[idx]\n doc.save(os.path.join(self.dirpath, path), fmt=self.fmt)", "def update_knowledge(self):\n pass", "def mutate_indel(genome, var):\n if var.size > 0: # insertion\n if var.size <= 10:\n new_seq = small_insert(var)\n if var.size > 10:\n new_seq = large_insert(var)\n for nt in new_seq:\n genome.mut_seq.insert(var.start,nt)\n\n var.ref = \".\"\n var.alt = \"\".join(new_seq)\n\n else: # deletion\n for i in range(abs(var.size)):\n genome.mut_seq.pop(var.start)\n var.ref = genome.seq[var.start:var.end]\n var.alt = \".\"", "def write(self, segment, result):\n pass", "def update(self,\n segment_id,\n segment,\n ):\n return self._invoke('update',\n {\n 'segment_id': segment_id,\n 'segment': segment,\n })", "def set_segment_translations(*args):\n return _ida_segment.set_segment_translations(*args)", "def multicore_handler(segment_index, segment, kwargs):\n reference_filename = f\"{segment_index}.txt\"\n with open(reference_filename, \"w+\", encoding=\"utf-8\") as file_out:\n for line in segment:\n file_out.write(line)\n\n kwargs[\"reference_file\"] = reference_filename\n find_and_download_songs(kwargs)\n\n if os.path.exists(reference_filename):\n os.remove(reference_filename)", "def update_document(self):\n pass", "def update_corpus(sentences):\n \n corNeg = None\n corPos = None\n corNeu = None\n try:\n corNeg = open('corpus\\\\neg.txt', 'ab')\n corPos = open('corpus\\\\pos.txt', 'ab')\n corNeu = open('corpus\\\\neu.txt', 'ab')\n except:\n print(\"Error: Loading Corpus\")\n return\n for sent_d in sentences:\n sent = sent_d[\"sentence_txt\"]\n tagged = sent_d[\"tag_id\"]\n # update corpus\n if tagged == tag.neg:\n corNeg.write('\\n'+sent)\n if tagged == tag.pos:\n corPos.write('\\n'+sent)\n if tagged == tag.neu:\n corNeu.write('\\n'+sent)\n corNeg.close()\n corPos.close()\n corNeu.close()", "def update_cds(self, line, cds):\n args = self.extract_cds_args(line)\n cds.add_indices(args['indices'])\n cds.add_phase(args['phase'])\n cds.add_identifier(args['identifier'])\n if 'score' in args:\n cds.add_score(args['score'])", "def edit_document():", "def setSegments(self, segments):\n for point, segment in zip(self.points, segments):\n point.set(segment.p1)", "def modify_an_entry(self):\n target_list = self.find_student()\n\n if not len(target_list):\n print('There is no contents to show')\n else:\n opt = self.input_options(['midterm', 'finalterm'], 1, 'Which test do you want to modify?')\n score = self.input_score()\n\n if opt.upper() == 'MIDTERM':\n for idx in target_list.index:\n self.student_list.loc[self.student_list.index == idx, 'midterm'] = score\n else:\n for idx in target_list.index:\n self.student_list.loc[self.student_list.index == idx, 'finalterm'] = score", "def set_segment_cmt(*args):\n return _ida_segment.set_segment_cmt(*args)", "def segment_counter(self, _):\n raise NotImplementedError(\n \"We do not support externally altering the segment counter\")", "def configure_segment(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n segment_name = kwargs[\"objectname\"]\n # Quick search to see if the segment exists of not.\n segment=search_nsx_json(proxy, sessiontoken, \"Segment\", segment_name)\n # If the segment exists, capture the path for the API call, and the existing configuration in JSON.\n if len(segment['results']) > 0:\n json_init=segment['results'][0]\n segment_path = segment['results'][0]['path']\n else:\n print(\"The segment does not exist. Please create a segment using 'new-segment'.\")\n sys.exit(1)\n # Establish a list of keys to keep - these represent the values we are willing/able to update.\n keep_list = ['display_name', 'connectivity_path', 'advanced_config', 'resource_type', 'subnets']\n # Construct a new JSON using just the keys we want to keep\n json_data = dict([(key, val) for key, val in \n json_init.items() if key in keep_list])\n # Update the json_data with the configuration specified by the user.\n if kwargs['connectivity'] is not None:\n json_data[\"advanced_config\"][\"connectivity\"] = f'{kwargs[\"connectivity\"]}'\n if kwargs['tier1_id'] is not None:\n if segment_path == \"/infra/tier-1s/cgw\":\n print(\"This is a fixed segment - you may not alter the connectivity path. Please create a 'flexible' segment.\")\n else:\n json_data[\"connectivity_path\"] = f'/infra/tier-1s/{kwargs[\"tier1_id\"]}'\n#\n # make the call to the API\n status = configure_segment_json(proxy, sessiontoken, segment_path, json_data)\n # present results.\n if status == 200:\n print(f'The following network has been modified: {segment_name}')\n vars = {\"proxy\":proxy, \"sessiontoken\":sessiontoken, \"object_type\":\"Segment\", \"object_id\":segment_name}\n search_nsx(**vars)\n else:\n print(\"The segment was not modified. Please check your syntax and try again.\")\n sys.exit(1)", "def sed(self, search, replace):\n\n for section in self.sections:\n for i, block in enumerate(section.blocks):\n if block == search:\n section.blocks[i] = replace\n self.all_damaged = True\n self.dirty = True" ]
[ "0.57963276", "0.5316993", "0.52617526", "0.51432174", "0.5116421", "0.5114566", "0.5093644", "0.5076101", "0.4989136", "0.49597093", "0.49469495", "0.49101588", "0.49080533", "0.4904618", "0.48917064", "0.48703775", "0.48694524", "0.48629633", "0.48147547", "0.48052073", "0.47958004", "0.47956005", "0.47599873", "0.4759835", "0.47438017", "0.47285873", "0.47137284", "0.46693212", "0.46477947", "0.46450815" ]
0.61081445
0
Add segments from a corpus in a storage.
def seg_add(self, remote_path, corpus_id, segments, storage_id=None): client, remote_path = self._get_storage(remote_path, storage_id=storage_id) return client.seg_add(corpus_id, segments)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_segments(self, *segments):\n for s in segments:\n self._add_one(s)", "def add_documents(self, docs):\n if 'sentences' in docs:\n for sent in docs.sentences:\n sent = map(self.process_token, [t for t in sent.tokens if not t.is_stopword])\n self._token_count.update(sent)\n\n else:\n sent = list(map(self.process_token, [t for t in docs.tokens if not t.is_stopword]))\n self._token_count.update(sent)", "def add(self, document):\n #words=[word.lower() for word in words if word.isalpha()] #added on 0415\n for token in [t.lower() for t in nltk.word_tokenize(document)]:\n if not token.isalpha():\n continue\n\n if token in self.stopwords:\n continue\n \n if self.stemmer:\n token = self.stemmer.stem(token)\n \n if self.__unique_id not in self.index[token]:\n self.index[token].append(self.__unique_id)\n \n self.documents[self.__unique_id] = document\n self.__unique_id += 1", "def add(self, documents):\n\n if self.cluster:\n self.cluster.add(documents)\n else:\n super().add(documents)\n\n return documents", "def segment(args):\n logger = logging.getLogger('SegEDU')\n rst_data = RSTData()\n logger.info('Loading vocab...')\n with open(args.word_vocab_path, 'rb') as fin:\n word_vocab = pickle.load(fin)\n logger.info('Word vocab size: {}'.format(word_vocab.size()))\n rst_data.word_vocab = word_vocab\n logger.info('Loading the model...')\n model = AttnSegModel(args, word_vocab)\n model.restore('best', args.model_dir)\n if model.use_ema:\n model.sess.run(model.ema_backup_op)\n model.sess.run(model.ema_assign_op)\n\n spacy_nlp = spacy.load('en', disable=['parser', 'ner', 'textcat'])\n for file in args.input_files:\n logger.info('Segmenting {}...'.format(file))\n raw_sents = []\n with open(file, 'r') as fin:\n for line in fin:\n line = line.strip()\n if line:\n raw_sents.append(line)\n samples = []\n for sent in spacy_nlp.pipe(raw_sents, batch_size=1000, n_threads=5):\n samples.append({'words': [token.text for token in sent],\n 'edu_seg_indices': []})\n rst_data.test_samples = samples\n data_batches = rst_data.gen_mini_batches(args.batch_size, test=True, shuffle=False)\n\n edus = []\n for batch in data_batches:\n batch_pred_segs = model.segment(batch)\n for sample, pred_segs in zip(batch['raw_data'], batch_pred_segs):\n one_edu_words = []\n for word_idx, word in enumerate(sample['words']):\n if word_idx in pred_segs:\n edus.append(' '.join(one_edu_words))\n one_edu_words = []\n one_edu_words.append(word)\n if one_edu_words:\n edus.append(' '.join(one_edu_words))\n\n if not os.path.exists(args.result_dir):\n os.makedirs(args.result_dir)\n save_path = os.path.join(args.result_dir, os.path.basename(file))\n logger.info('Saving into {}'.format(save_path))\n with open(save_path, 'w') as fout:\n for edu in edus:\n fout.write(edu + '\\n')", "def add_segment(self, segment):\n self.segments.append(segment)", "def add_segm(*args):\n return _ida_segment.add_segm(*args)", "def add_documents(self, docs):\n for sent in docs:\n sent = map(self.process_token, sent)\n self._token_count.update(sent)", "def add_segment_translation(*args):\n return _ida_segment.add_segment_translation(*args)", "def merge_segdb(segdbs):\n segdb = segdbs[0]\n for r in segdbs[1:]:\n segdb.extend(r)\n return segdb", "def merge_segdb(segdbs):\n segdb = segdbs[0]\n for r in segdbs[1:]:\n segdb.extend(r)\n return segdb", "def construct_segments(self):\n for strand in self.strand_list:\n strand.construct_segment()", "def segmentize(gentle_outputs, audio_file,\n anchor_length):\n # variables to help with bounding Segments\n correct_count = 0\n end_prev_anchor = 0\n first_correct_index = None\n\n # convenience variable\n total_gentle_len = len(gentle_outputs)\n\n # Array to store all final segments\n segs = []\n\n # run through the list of Word objects\n for index, word in enumerate(gentle_outputs):\n\n if word.success():\n\n # if the word was successfully aligned\n # update variable values and move on\n correct_count += 1\n\n # update first_correct tracker for later bounding\n if first_correct_index is None:\n first_correct_index = index\n\n\n # if word is unaligned, check if current\n elif correct_count >= anchor_length:\n\n # Make sure that the unaligned segment exists\n # Would throw an error if the audio file began\n #with an anchor point\n if end_prev_anchor != first_correct_index:\n\n # load the previous unanchored words as a Segment\n seg = get_segment(gentle_outputs[end_prev_anchor: \\\n first_correct_index], False, audio_file,\n total_gentle_len)\n\n segs.append(seg)\n\n # Load the current ancor words as a Segment\n seg = get_segment(gentle_outputs[first_correct_index: \\\n index], True, audio_file, total_gentle_len)\n\n segs.append(seg)\n\n # set the end prev_anchor tracker\n # to the current location\n end_prev_anchor = index\n\n # reset counter variables\n correct_count = 0\n first_correct_index = None\n\n # Resets counter variables if the\n # current word is unaligned and is less\n # than the anchor length\n elif index < len(gentle_outputs) - 1:\n\n # reset counter variables\n correct_count = 0\n first_correct_index = None\n\n # if we have reached the end of the audio file\n # we need to segmentize all the remaining\n # unsegmented part of the transcript/audiofile\n # and reaccount for all cases\n if index == len(gentle_outputs) - 1:\n\n # Case: current seg is an anchor point\n # store unanchored segment\n # then store anchored segment\n if correct_count >= anchor_length:\n\n if end_prev_anchor != first_correct_index:\n\n # get previous unanchored seg\n seg = get_segment(gentle_outputs[end_prev_anchor: \\\n first_correct_index], False, audio_file,\n total_gentle_len)\n\n # store previous unanchored seg\n segs.append(seg)\n\n # get the anchor segment\n seg = get_segment(gentle_outputs[first_correct_index:], \\\n True, audio_file, total_gentle_len)\n\n # store the anchor seg\n segs.append(seg)\n\n # update end of prev anchor tracker\n end_prev_anchor = index\n\n # Case: current segment does not qualify as an anchor point\n # then just store all the remaining words as an unanchored segment\n else:\n\n # store the previous unanchored segments as a seg- append\n seg = get_segment(gentle_outputs[end_prev_anchor:], \\\n False, audio_file, total_gentle_len)\n segs.append(seg)\n\n\n return segs", "def add_segment(self):\n last_seg = c.coords(self.segments[0].instance)\n x = last_seg[2] - SEG_SIZE\n y = last_seg[3] - SEG_SIZE\n self.segments.insert(0, Segment(x, y))", "def extend(self):\n # -1 in the segments means that starts counting in the end of the list\n self.add_segment(self.segments[-1].position())", "def parse_segments(self):\n segs = self.unixtext.split(\"$$\")\n for seg in segs:\n self.segments.append(TextProductSegment(seg, self))", "def add_segment(self, segment):\n assert segment is None or isinstance(segment, Segment)\n\n self.segment = segment\n if segment is None:\n return\n\n ## reset Strand description with the description derived\n ## from the new Segment\n try:\n frag1 = segment[0]\n frag2 = segment[-1]\n except IndexError:\n return\n\n self.chain_id1 = frag1.chain_id\n self.fragment_id1 = frag1.fragment_id\n self.res_name1 = frag1.res_name\n\n self.chain_id2 = frag2.chain_id\n self.fragment_id2 = frag2.fragment_id\n self.res_name2 = frag2.res_name", "def get_segments(self):\n\t\tos.chdir(self.segment_path)\n\t\tfor path in glob.glob(\"%s/*.seg\" % self.segment_path):\n\t\t\t_file = os.path.split(path)[1]\n\t\t\tdae = DiscreetArchiveElement(self,_file,element_type='segment')\n\t\t\tself.elements.append(dae)\n\t\treturn True", "def build_corpus(self):\n print(\"Inside the build_corpus >>>>>\")\n documentsCount = 0\n documents = self.documents\n\t\t\n with open(self.documents_path) as file:\n for documents in file.readlines():\n documents = documents.rstrip('}\\n ').strip('0\\t').strip('1\\t').split(' ')\n documentsCount = documentsCount +1\n self.documents.append(documents)\n\t\t\t\n self.number_of_documents = documentsCount", "def __load_segments(self):\r\n self.__segments = []\r\n if len(self.points) > 1:\r\n s = self.points[0]\r\n k = 1\r\n while k < len(self.points):\r\n e = self.points[k]\r\n self.__segments.append(Segment(s, e))\r\n s = e \r\n k += 1\r\n e = self.points[0]\r\n self.__segments.append(Segment(s, e))", "def storeSegment ( baseurl, fields, token ):\n\n # Create the segment and initialize it's fields\n ann = annotation.Annotation()\n\n ann.annid = int(fields[0])\n\n # Exceptional cases\n if ann.annid in EXCEPTIONS:\n print \"Skipping id \", ann.annid\n return \n\n descriptorstr = fields[40].split(\"\\\"\")\n descriptor = descriptorstr[1]\n\n ann.kvpairs = { 'sourceId':fields[0], 'sourceDescription':descriptor }\n ann.author = 'Kasthuri,N.'\n\n pprint(vars(ann))\n\n h5anno = h5ann.AnnotationtoH5 ( ann )\n\n url = \"http://%s/annotate/%s/\" % ( baseurl, token)\n print url\n\n try:\n req = urllib2.Request ( url, h5anno.fileReader()) \n response = urllib2.urlopen(req)\n except urllib2.URLError, e:\n print \"Failed URL\", url\n print \"Error %s\" % (e.read()) \n sys.exit(0)\n\n the_page = response.read()\n print \"Success with id %s\" % the_page", "def set_segments(self, segments):\n self.send_command(Command.SET_SEGMENT_COUNT, [segments])", "def add(self, tokens):\n\n for token in tokens:\n self.vocab.add(token)\n\n for leader, token in generate_ngrams(tokens, self.size, include_terminator = self.size != 1):\n if leader not in self.frequencies:\n self.frequencies[leader] = Frequency()\n\n self.frequencies[leader].add(token)", "def add_segment(self, segment):\n assert segment is None or isinstance(segment, Segment)\n self.segment = segment\n\n ## just return if the segment is None\n if segment is None:\n return\n\n ## reset AlphaHelix description with the description derived\n ## from the new Segment\n try:\n frag1 = segment[0]\n frag2 = segment[-1]\n except IndexError:\n return\n\n self.chain_id1 = frag1.chain_id\n self.fragment_id1 = frag1.fragment_id\n self.res_name1 = frag1.res_name\n\n self.chain_id2 = frag2.chain_id\n self.fragment_id2 = frag2.fragment_id\n self.res_name2 = frag2.res_name\n\n self.helix_length = len(segment)", "def add_segment(self):\n copy = self.segments[-1]\n segment = Segment(copy.radius, copy.position.copy(),\n copy.heading_vector.copy())\n self.segments.append(segment)", "def updateAnnotations(self):\n self.backupDatafiles()\n print(\"Updating annotation files \", self.field(\"trainDir\"))\n listOfDataFiles = QDir(self.field(\"trainDir\")).entryList(['*.data'])\n for file in listOfDataFiles:\n # Read the annotation\n segments = Segment.SegmentList()\n newsegments = Segment.SegmentList()\n segments.parseJSON(os.path.join(self.field(\"trainDir\"), file))\n allSpSegs = np.arange(len(segments)).tolist()\n newsegments.metadata = segments.metadata\n for segix in allSpSegs:\n seg = segments[segix]\n if self.field(\"species\") not in [fil[\"species\"] for fil in seg[4]]:\n newsegments.addSegment(seg) # leave non-target segments unchanged\n else:\n for seg2 in self.segments:\n if seg2[1] == seg:\n # find the index of target sp and update call type\n seg[4][[fil[\"species\"] for fil in seg[4]].index(self.field(\"species\"))][\"calltype\"] = self.clusters[seg2[-1]]\n newsegments.addSegment(seg)\n newsegments.saveJSON(os.path.join(self.field(\"trainDir\"), file))", "def append(self, document):\n raise NotImplemented(\"Corpus does not allow appending\")", "def addSegment(self, p1, p2, a, b):\n\n self.segments.append((p1,p2,a,b))", "def upload_semantic_segments_to_boxes(self, data):\n #data_str = 'array[\"' + '\",\"'.join(data) + '\"]'\n data_str = \"array['\" + \"','\".join(data) + \"']\"\n sql = f\"SET role {self.write_role}; \" \\\n + f\"update results.boxes \" \\\n + f\"set semantic_segment_bottom_edge_mode = ({data_str})[id];\"\n return sql", "def add_sentence(self, sentence):\n cleaned = self.clean_string(sentence)\n stemmed = self.stem(cleaned)\n self.texts.append(stemmed)" ]
[ "0.60334104", "0.56780165", "0.54971635", "0.54327893", "0.53650546", "0.53482825", "0.5279206", "0.52374446", "0.5221571", "0.52145606", "0.52145606", "0.50907433", "0.5046816", "0.50380266", "0.50286496", "0.49813986", "0.4940868", "0.48904213", "0.48843732", "0.48765466", "0.48740906", "0.48739758", "0.48563632", "0.4826289", "0.4819436", "0.4814086", "0.4793002", "0.47919154", "0.4789049", "0.47800124" ]
0.700075
0
Renames a file or directory on storage from old_remote_path to new_remote_path.
def rename(self, old_remote_path, new_remote_path, storage_id=None): client_old, old_remote_path = self._get_storage(old_remote_path, storage_id=storage_id) client_new, new_remote_path = self._get_storage(new_remote_path, storage_id=storage_id) if client_old._storage_id != client_new._storage_id: raise ValueError('rename on different storages') result = client_old.rename(old_remote_path, new_remote_path) if result is None: # some storages return nothing when ok and raise exception when error return True return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rename(path, new_path):\n fs.rename(path, new_path)", "def rename(self, name, new_name):\n\n if not new_name:\n raise ValueError(\"Current remote name must be a non-empty string\")\n\n if not new_name:\n raise ValueError(\"New remote name must be a non-empty string\")\n\n problems = ffi.new('git_strarray *')\n err = C.git_remote_rename(problems, self._repo._repo, to_bytes(name), to_bytes(new_name))\n check_error(err)\n\n ret = strarray_to_strings(problems)\n C.git_strarray_free(problems)\n\n return ret", "def remoteTestsDirRenamed(self, projectId, directoryPath, directoryName, newName):\n if len(directoryPath) > 0:\n complete_old = \"%s/%s\" % (directoryPath, directoryName)\n complete_new = \"%s/%s\" % (directoryPath, newName)\n else:\n complete_old = directoryName\n complete_new = newName\n\n for tabId in xrange( self.tab.count() ): \n doc = self.tab.widget(tabId)\n \n # bypass the welcome page\n if isinstance(doc, WelcomePage): \n continue\n # end of bypass\n \n \n if doc.isRemote == True and doc.getPathOnly().startswith(complete_old) and \\\n doc.project == int(projectId) and doc.repoDest==UCI.REPO_TESTS: \n to_keep = doc.getPathOnly().split(complete_old)\n if len(to_keep) > 1: to_keep = to_keep[1]\n else: to_keep = to_keep[0]\n\n full_new_path = \"%s%s\" % (complete_new, to_keep)\n\n self.tab.setCurrentIndex(tabId)\n \n msg = self.tr(\"The path of this file has been renamed.\\nDo you want to update the path ?\")\n buttons = QMessageBox.Yes | QMessageBox.No \n answer = QMessageBox.question(self, Settings.instance().readValue( key = 'Common/name' ), \n msg, buttons)\n if answer == QMessageBox.Yes:\n doc.updatePath( pathFilename=full_new_path )\n doc.setUnmodify()\n elif answer == QMessageBox.No:\n doc.unSaved()\n doc.setModify()", "def remoteAdaptersDirRenamed(self, directoryPath, directoryName, newName):\n if len(directoryPath) > 0:\n complete_old = \"%s/%s\" % (directoryPath, directoryName)\n complete_new = \"%s/%s\" % (directoryPath, newName)\n else:\n complete_old = directoryName\n complete_new = newName\n \n for tabId in xrange( self.tab.count() ): \n doc = self.tab.widget(tabId)\n \n # bypass the welcome page\n if isinstance(doc, WelcomePage): \n continue\n # end of bypass\n \n if doc.isRemote == True and doc.getPathOnly().startswith(complete_old) and \\\n doc.project == 0 and doc.repoDest==UCI.REPO_ADAPTERS: \n to_keep = doc.getPathOnly().split(complete_old)\n if len(to_keep) > 1: to_keep = to_keep[1]\n else: to_keep = to_keep[0]\n\n full_new_path = \"%s%s\" % (complete_new, to_keep)\n\n self.tab.setCurrentIndex(tabId)\n \n msg = self.tr(\"The path of this file has been renamed.\\nDo you want to update the path ?\")\n buttons = QMessageBox.Yes | QMessageBox.No \n answer = QMessageBox.question(self, Settings.instance().readValue( key = 'Common/name' ), \n msg, buttons)\n if answer == QMessageBox.Yes:\n doc.updatePath( pathFilename=full_new_path )\n doc.setUnmodify()\n elif answer == QMessageBox.No:\n doc.unSaved()\n doc.setModify()", "def mv(self, src_path, dst_path):\n try:\n postdata = codecs.encode(json.dumps({ 'src': src_path, 'dst': dst_path }), 'utf-8')\n self._urlopen('/api/fileops/move', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to move '{}' to '{}'\".format(src_path, dst_path))", "def rename(self, src, dst):\n os.rename(src, dst)", "def rename(old, new):", "def rename(old, new):", "def _rename_file(self, old_path, new_path):\n if not self.mount():\n return False\n _log(\"AnnexGvfsBackend._rename_file(%r -> %r)\" % (old_path, new_path))\n old_dir_uri = self.path_to_uri(os.path.dirname(old_path))\n new_dir_uri = self.path_to_uri(os.path.dirname(new_path))\n old_uri = self.path_to_uri(old_path)\n new_uri = self.path_to_uri(new_path)\n try:\n if not self.gvfs.create_dir_p(new_dir_uri):\n raise IOError()\n if not self.gvfs.rename_file(old_uri, new_uri):\n raise IOError()\n except IOError:\n return False\n else:\n return True", "def rename(self, new_name):\n\n self.__enforce_connected()\n current_url = self.url\n self._set_field(\"name\",new_name)\n self.set_json(self._http_client.update(current_url, self.get_json()))", "def os_rename(self, source, destination):\n cmd = ['/bin/mv', source, destination]\n process = subprocess.Popen(cmd, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n returncode = subprocess.Popen.wait(process)\n return returncode", "def rename(self, target):\n target = os.fspath(target)\n return error.checked_call(os.rename, self.strpath, target)", "def rename(oldname, newname):", "def rename_file(path, old_name, new_name):\n \n old_file = os.path.join(path, old_name)\n new_file = os.path.join(path, new_name)\n os.rename(old_file, new_file)", "def mv(cur_path, new_path):\n cur_abs = navigate.get_abs_path(cur_path)\n new_abs = navigate.get_abs_path(new_path)\n cur_parent, cur_name = navigate.split_path(cur_abs)\n new_parent, new_name = navigate.split_path(new_abs)\n up_parent, up_name = navigate.split_path(new_parent)\n if not db.file_exists(cur_parent, cur_name):\n print \"Error: '\" + cur_name + \"' does not exist.\"\n elif up_parent is not None and not db.directory_exists(up_parent, up_name):\n print \"Error: '\" + new_parent + \"' is not a valid directory.\"\n elif db.file_exists(new_parent, new_name):\n print \"Error: '\" + new_name + \"' already exists at that location.\"\n else:\n cur_dbox_path = '/' + cur_name\n new_dbox_path = '/' + new_name\n access_token = db.get_access_to_file(cur_parent, cur_name)\n client = dropbox.client.DropboxClient(access_token)\n client.file_move(cur_dbox_path, new_dbox_path)\n db.move_file(cur_parent, cur_name, new_parent, new_name)", "def rename_file(old_path, new_path):\n if os.path.exists(new_path):\n raise FileExistsError(errno.EEXIST, os.strerror(errno.EEXIST),\n old_path, new_path)\n os.rename(old_path, new_path)", "def rename(self, old_path, new_path):\n self.rename_file(old_path, new_path)\n self.checkpoints.rename_all_checkpoints(old_path, new_path)", "def rename(self, src, dst, preserve=False):\n self.connect()\n if preserve:\n self._write('RENAMENX %s %s\\r\\n' % (src, dst))\n return self._get_numeric_response()\n else:\n self._write('RENAME %s %s\\r\\n' % (src, dst))\n return self._get_simple_response().strip()", "def rename(old, new):\n\ttry:\n\t\tos.rename(old, new)\n\texcept OSError as e:\n\t\tif e.errno != errno.EEXIST:\n\t\t\traise\n\t\tos.remove(old)", "def auto_rename(file_path, new_name):\n \n # Return if no file given\n if not file_path:\n return ''\n else:\n file_path = file_path\n \n # Get the new name\n new_path = change_basename(file_path, new_name)\n \n \n # Changed?\n if new_path != file_path:\n # Try to rename\n try:\n shutil.move(os.path.join(settings.MEDIA_ROOT, file_path), os.path.join(settings.MEDIA_ROOT, new_path))\n except IOError:\n # Error? Restore original name\n new_path = file_path\n \n # Return the new path replacing backslashes (for Windows)\n return new_path", "def move_to(self, file_name, to_dir=None, change_name_to=None):\n self._check_filename(file_name)\n from_path = os.path.join(self.local_root, file_name)\n\n if not os.path.isfile(from_path):\n raise FileNotFoundError(\n f\"{file_name} not found in {self.local_root} on local machine\"\n )\n\n file_name = file_name if change_name_to is None else change_name_to\n to_dir = \"\" if to_dir is None else to_dir\n to_path = posixpath.join(self.root, to_dir, file_name)\n self.makedir(to_dir)\n self._check_file_exists(to_path, should_exist=False)\n\n with self.ssh.open_sftp() as sftp:\n print(f\"Transferring {from_path} to server\")\n sftp.put(from_path, to_path)\n\n print(f\"--> Deleting {from_path} on local machine\")\n os.remove(from_path)", "def relocate(self, source, destination):\n destination_dir = os.path.dirname(destination)\n if not os.path.exists(destination_dir):\n self.subdir(destination_dir)\n os.rename(source, destination)", "def mv(src_path, dest_path):\n try:\n os.rename(src_path, dest_path)\n except OSError:\n # this will happen on windows\n os.remove(dest_path)\n os.rename(src_path, dest_path)", "def rename(self,oldName,newName):\n #--Update references\n fileInfo = self[oldName]\n self[newName] = self[oldName]\n del self[oldName]\n self.table.moveRow(oldName,newName)\n #--FileInfo\n fileInfo.name = newName\n #--File system\n newPath = os.path.join(fileInfo.dir,newName)\n oldPath = os.path.join(fileInfo.dir,oldName)\n renameFile(oldPath,newPath)\n #--Done\n fileInfo.madeBackup = False", "def rename(self, name=None, destination=None):\n raise NotImplementedError\n return None", "def fs_rename_entry(self, oldPath, newPath):\n\t\treturn Job(SDK.PrlSrv_FsRenameEntry(self.handle, oldPath, newPath)[0])", "def renamed(self, source, dest):\r\n self.__close_and_reload(source, new_filename=dest)", "def hmove(src_path, res_path):\n os.rename(src_path, res_path)", "def remoteTestsFileRenamed(self, projectId, filePath, fileName, fileExtension, newName):\n if len(filePath) > 0:\n complete_path = \"%s/%s.%s\" % (filePath, fileName, fileExtension)\n else:\n complete_path = \"%s.%s\" % ( fileName, fileExtension)\n tabId = self.checkAlreadyOpened(path = complete_path, \n remoteFile=True, \n repoType=UCI.REPO_TESTS, \n project=projectId)\n if tabId is not None:\n doc = self.tab.widget(tabId)\n self.tab.setCurrentIndex(tabId)\n buttons = QMessageBox.Yes | QMessageBox.No \n answer = QMessageBox.question(self, Settings.instance().readValue( key = 'Common/name' ), \n self.tr(\"This file has been renamed.\\nDo you want to update the name ?\") , buttons)\n if answer == QMessageBox.Yes:\n doc.updateFilename( filename=newName )\n doc.setUnmodify()\n elif answer == QMessageBox.No:\n doc.unSaved()\n doc.setModify()", "def do_mv(self, args):\n if args:\n args = args.split()\n\n if not args or len(args) < 2:\n print('Usage: mv source_file target_file')\n return\n\n src = args[0]\n dst = args[1]\n if not (src.startswith('shared/') and dst.startswith('shared/')\n or self._user):\n print('login required for specifying non-shared file with mv')\n return\n\n try:\n new_name = self._qm.rename_file(self._user, src, dst)\n print('renamed file', src, 'to', new_name)\n except Exception as e:\n print('ERROR renaming %s: %s' % (src, e), file=sys.stderr)\n return" ]
[ "0.66009545", "0.65774685", "0.6334382", "0.6323269", "0.6254582", "0.62079513", "0.61884886", "0.61884886", "0.6153674", "0.6129355", "0.6117416", "0.61145955", "0.6065734", "0.6009174", "0.60073656", "0.60063607", "0.5983843", "0.5968736", "0.592499", "0.5916464", "0.5915777", "0.59113413", "0.5903513", "0.58957225", "0.5860356", "0.5843619", "0.5840895", "0.5839041", "0.58317196", "0.58285254" ]
0.7846696
0
Add a tag associated with a corpus.
def tag_add(self, remote_path, corpus_id, tag, storage_id=None): client, remote_path = self._get_storage(remote_path, storage_id=storage_id) return client.tag_add(corpus_id, tag)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_tag(self, tag):\n self.tags.append(tag)", "def add(self, tag):\n self.tags[tag.name] = tag", "def add_tag(self, tag: str) -> None:\n tags = self.get_tag_index()\n tags.append(tag)\n self.write_tag_index(list(set(tags)))", "def add_tag(self, tag, attributes, extent):\n self.tags.append((tag, attributes, extent))", "def add_tag(self, dataset: \"Dataset\", tag: \"DatasetTag\"):\n raise NotImplementedError", "def AddTag(self, tag):\n\n if not self.persistant:\n return\n\n self.db.ExecuteSql('insert into tags(tag, track_id) values(\"%s\", %d);'\n %(tag, self.persistant['id']))\n self.db.ExecuteSql('commit;')", "def add_tag(self, tag):\n cp = self.copy()\n cp.tags.add(tag)\n return cp", "def add_tag(self, tag):\n if tag not in self._tag:\n self._tag.append(tag)\n\n return self", "def add_tag(self, session, tag):\n self._tag(session.put, key=tag, session=session)", "def add_tag(self, key, value=''):\r\n status = self.connection.create_tags([self.id], {key : value})\r\n if self.tags is None:\r\n self.tags = TagSet()\r\n self.tags[key] = value", "def add_tag(self, transaction, citation_handle, tag_handle):\n citation = self.dbstate.db.get_citation_from_handle(citation_handle)\n citation.add_tag(tag_handle)\n self.dbstate.db.commit_citation(citation, transaction)", "def test_add_tag(self):\n fc = self.read_feature(region='Adriatic_Sea')\n\n fc.tag(tags=['tag1', 'tag2', 'Mediterranean_Basin'])\n assert (fc.features[0]['properties']['tags'] ==\n 'Adriatic_Sea;Mediterranean_Basin;tag1;tag2')\n\n self.check_feature(fc.features[0])", "def add_word_tag(self, token, label):\n # Add total count for label\n self.label_counts[label] += 1\n # Add count for word given label\n if token not in self.words_labels_counts[label]:\n self.words_labels_counts[label][token] = 1\n else:\n self.words_labels_counts[label][token] += 1", "def createTag(self, authenticationToken, tag):\r\n pass", "async def addtags(self, ctx, tag, *, data):\r\n\t\tTag = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tif not tag in Tag:\r\n\t\t\tTag[tag] = self.Conf.Tags\r\n\t\t\tawait ctx.send('Added Tag: {}'.format(tag))\r\n\t\telse:\r\n\t\t\tawait ctx.send('Edited Tag: '.format(tag))\r\n\r\n\t\tnowgmt = time.strftime(\"%H:%M:%S, %d/%m/%Y\", time.gmtime())\r\n\t\t\r\n\t\tTag[tag]['user'] = ctx.author.id\r\n\t\tTag[tag]['data'] = data\r\n\t\tTag[tag]['time'] = nowgmt\r\n\t\tself.settings.ServerConfig(ctx.guild.id, 'Tags', Tag)", "def add_tag(demo_id, member_id, tag):\n if demo_id is None:\n raise ValueError(\"Cannot add tag for None demo.\")\n \n with Session() as session:\n demo_tag = DemoTag(demo_id, member_id, tag)\n session.add(demo_tag)\n session.commit()", "def setAddTags(self,value):\n self.PDFreactorConfiguration.in1[\"addTags\"] = value", "def add(self, document):\n #words=[word.lower() for word in words if word.isalpha()] #added on 0415\n for token in [t.lower() for t in nltk.word_tokenize(document)]:\n if not token.isalpha():\n continue\n\n if token in self.stopwords:\n continue\n \n if self.stemmer:\n token = self.stemmer.stem(token)\n \n if self.__unique_id not in self.index[token]:\n self.index[token].append(self.__unique_id)\n \n self.documents[self.__unique_id] = document\n self.__unique_id += 1", "def add_tag(self, obj, tag_name):\r\n tag_names = parse_tag_input(tag_name)\r\n if not len(tag_names):\r\n raise AttributeError(_('No tags were given: \"%s\".') % tag_name)\r\n if len(tag_names) > 1:\r\n raise AttributeError(_('Multiple tags were given: \"%s\".') % tag_name)\r\n tag_name = tag_names[0]\r\n if settings.FORCE_LOWERCASE_TAGS:\r\n tag_name = tag_name.lower()\r\n tag, created = self.get_or_create(name=tag_name)\r\n ctype = ContentType.objects.get_for_model(obj)\r\n TaggedItem._default_manager.get_or_create(\r\n tag=tag, content_type=ctype, object_id=obj.pk)", "def add(self, keyword):\n tag = self._find(keyword)\n if tag is None:\n tag = etree.SubElement(self.meta, CN('meta:keyword'))\n tag.text = keyword", "def add_tag(convo_ID, tag_ID):\n # Make API request\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/tags\"\n payload = json.dumps({\"tag_ids\": [tag_ID]})\n headers = {\"Authorization\": BEARER_TOKEN, \"Content-Type\": \"application/json\"}\n requests.request(\"POST\", url, headers=headers, data=payload)", "def add_tag_data(tag):\n\n add_tag = Tag(tag=tag)\n db.session.add(add_tag)\n try:\n db.session.commit()\n except (Exception, exc.SQLAlchemyError, exc.InvalidRequestError, exc.IntegrityError) as e:\n print(tag + '\\n' + str(e))", "def add_tag(self, tag):\n\n # directional relation: tag is the blank of everything in the list\n self.relations[tag] = {\n \"overlord\": [],\n \"hegemon\": [], # for tributary\n \"tributary\": [],\n \"vassal\": [],\n \"guaranteeing\": [],\n \"guarantor\": [],\n \"alliance\": [],\n \"senior\": [],\n \"junior\": [],\n \"marriage\": []\n }", "def tag(self, tag):\n self.tag = tag", "def put_tag(self, key, tag):\n self._entries[key] = tag", "def _add_tag(self, tag_name):\n tag = TagInfo()\n tag._name = tag_name\n self._tags.append(tag)\n return tag", "def append(self, tag):\r\n self.insert(len(self.contents), tag)", "def create_tag(self, entry_name, tag):\n return self.__datacatalog.create_tag(parent=entry_name, tag=tag)", "def add_tag():\n \n return render_template('tags/add_tag.html')", "def _add_tags(self):\n\n if self.version != 'live':\n return\n\n tags = [t.strip() for t in self.tags_text.split(',')]\n tags = list(set(tags))\n\n for tag_name in tags:\n tag_slug = slugify(tag_name)\n if tag_slug:\n try:\n tag = Tag.objects.get(blog=self.blog, slug=tag_slug)\n except Tag.DoesNotExist:\n tag = Tag( blog = self.blog,\n name = tag_name,\n slug = tag_slug)\n\n tag.increment()\n tag.save()\n\n self.tags.add(tag)" ]
[ "0.67452145", "0.67087305", "0.6602822", "0.654039", "0.6409781", "0.6332519", "0.61717", "0.61188984", "0.60842836", "0.60632265", "0.60379875", "0.60356516", "0.58503866", "0.58032125", "0.57711476", "0.5755432", "0.5735581", "0.57319754", "0.56949925", "0.56422025", "0.5634466", "0.5632429", "0.56294197", "0.56160045", "0.5605506", "0.5600144", "0.55941266", "0.55851984", "0.5584421", "0.55464906" ]
0.7265227
0
Remove a tag associated with a corpus.
def tag_remove(self, remote_path, corpus_id, tag, storage_id=None): client, remote_path = self._get_storage(remote_path, storage_id=storage_id) return client.tag_remove(corpus_id, tag)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_tag(self, tag: str) -> None:\n tags = self.get_tag_index()\n tags.remove(tag)\n self.write_tag_index(list(set(tags)))", "def remove_tag(self, dataset: \"Dataset\", tag: \"DatasetTag\"):\n raise NotImplementedError", "def remove_tag(args):", "def delete_tag(tag):\n tag.destroy()", "def delete_tag(self,tag):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n del self.tag_dict[tag]", "def remove_tags_from_corpus(text_corpus):\n\n text_corpus[text_column_name] = text_corpus[\n text_column_name].apply(remove_tags)\n text_corpus = text_corpus[text_corpus[text_column_name] != ' ']\n return text_corpus", "def test_remove_tag(self):\n fc = self.read_feature(region='Adriatic_Sea')\n\n fc.tag(tags=['Mediterranean_Basin', 'tag1'], remove=True)\n assert (fc.features[0]['properties']['tags'] == 'Adriatic_Sea')\n\n self.check_feature(fc.features[0])", "def remove_tag(self, tag):\n if tag in self.tags:\n index = self.tags.index(tag)\n self.tags[index:index + 1] = []\n self.stop_times[index:index + 1] = []", "def remove_tag(self, tag):\n _tag_entity('task', self.task_id, tag, untag=True)", "def remove_tag(self, tag):\n cp = self.copy()\n cp.tags.remove(tag)\n return cp", "def remove_tag(self, tag):\n if tag in self._tag:\n self._tag.remove(tag)\n\n return self", "def delete_tag(self, tag):\n return self.__datacatalog.delete_tag(name=tag.name)", "async def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter):\n await ctx.send(await tag.delete())", "def untag(self, tag):\n if isinstance(tag, six.integer_types):\n try:\n tag = Tag.objects.get(pk=tag, owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n if isinstance(tag, six.string_types):\n try:\n tag = Tag.objects.get(slug=makeslug(tag), owner=self.owner)\n except Tag.DoesNotExist:\n return\n \n self.tags.remove(tag)", "def remove_tag(tag):\n check_call(['git', 'tag', '-d', tag])", "def delete_tag(self, session, tag):\n self._tag(session.delete, key=tag, delete=True, session=session)", "def remove_tag(self, tag):\n for task in self._tasks:\n task.remove_tag(tag)\n\n return self", "async def removetags(self, ctx, tag=None):\r\n\t\tTag = self.settings.ServerConfig(ctx.guild.id, 'Tags')\r\n\t\tif not tag in Tag:\r\n\t\t\treturn await ctx.send('Can\\'t find Tag: '.format(tag))\t\r\n\r\n\t\tdel Tag[tag]\r\n\t\tself.settings.ServerConfig(ctx.guild.id, 'Tags', Tag)\r\n\r\n\t\tawait ctx.send('Removed Tag: '.format(tag))", "def untag():\n version = git.prompt_tag('Which tag to delete?')\n if not version:\n abort('No available version tag')\n git.delete_tag(version)", "def remove(self, keyword):\n tag = self._find(keyword)\n if tag is not None:\n self.meta.remove(tag)", "def removeEmbedded(self, tag):\n self.embeddedTags = self.embeddedTags[:-1]", "def remove_tag(self, key, value=None):\r\n if value:\r\n tags = {key : value}\r\n else:\r\n tags = [key]\r\n status = self.connection.delete_tags([self.id], tags)\r\n if key in self.tags:\r\n del self.tags[key]", "def remove_tag(convo_ID, tag_ID):\n # Make API request\n url = \"https://api2.frontapp.com/conversations/\" + convo_ID + \"/tags\"\n payload = json.dumps({\"tag_ids\": [tag_ID]})\n headers = {\"Authorization\": BEARER_TOKEN, \"Content-Type\": \"application/json\"}\n requests.request(\"DELETE\", url, headers=headers, data=payload)", "def untag_element(self,tag_name,element):\n pass", "def remove(self, tag_name: str, category: ty.Optional[str] = None):\n tags = self.__holder.db_tags.filter(lambda t: t.name == tag_name)\n if category is not None:\n tags = tags.filter(category=category)\n\n tag = tags.first()\n if tag:\n self.__holder.db_tags.remove(tag)", "def untag(tagged_sentence):\n return [w for (w, t) in tagged_sentence]", "def use_tag(self, tag):\n try:\n self.available_tags.remove(tag)\n except ValueError:\n return False\n return True", "def delete_tags(self, session):\n self._tag(session.delete, delete=True, session=session)", "def delete_tag(tag, directory=None):\n execute_command('git tag -d {0}'.format(tag), shell=True, cwd=directory)", "def remove_tag(tag_id):\n tag = Tags.query.get(tag_id)\n db_session.delete(tag)\n db_session.commit()\n return 'Tag #%s (%s) has been deleted.' % (tag_id, tag.tag), 'success'" ]
[ "0.72124904", "0.7099556", "0.70785826", "0.69428086", "0.68757766", "0.68354094", "0.6760699", "0.67518336", "0.66936296", "0.66848594", "0.6625902", "0.6531172", "0.6494891", "0.6458214", "0.64311934", "0.64180356", "0.62905985", "0.6257875", "0.62415624", "0.62254566", "0.61888295", "0.6164799", "0.6122121", "0.6104719", "0.6017967", "0.6006862", "0.59974945", "0.5988415", "0.59770447", "0.59758157" ]
0.75549275
0
Construct half of a queued batch, all of the same label
def _generate_half_batch(record_data, min_queue_examples, batch_size, num_steps, test_mode): # From TF documentation: "The batching will be nondeterministic if num_threads > 1" # Ok to have many threads for training, but in testing want a deterministic result. if test_mode: num_preprocess_threads = 1 else: num_preprocess_threads = 16 sequence = record_data.sequence label = record_data.label subject = record_data.subject_id name = record_data.image_name coords = record_data.patch_coords feature = record_data.features # Create a batch of this data type's sequences, half the size of the # batch that will be used in the RNN if False: sequences, label_half_batch, subjects, names, coordss, features = tf.train.batch( [sequence, label, subject, name, coords, feature], batch_size = (batch_size // 2), num_threads = num_preprocess_threads, capacity = (min_queue_examples + 3 * batch_size) // 2) else: sequences, label_half_batch, subjects, names, coordss, features = tf.train.shuffle_batch( [sequence, label, subject, name, coords, feature], batch_size = (batch_size // 2), num_threads = num_preprocess_threads, capacity = (min_queue_examples + 3 * batch_size) // 2, min_after_dequeue = min_queue_examples // 2) # Remove one dimension from label_batch label_half_batch = tf.reshape(label_half_batch, [batch_size // 2]) subjects = tf.reshape(subjects, [batch_size // 2, record_data.patient_ID_bytes]) names = tf.reshape(names, [batch_size // 2, record_data.image_name_bytes]) coordss = tf.reshape(coordss, [batch_size // 2, record_data.coord_bytes]) features = tf.reshape(features, [batch_size // 2, record_data.num_features-1]) return sequences, label_half_batch, subjects, names, coordss, features
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_batch():\n\n # Initialize variables\n example = np.zeros(self.batch_size)\n labels = np.zeros((self.batch_size, 1))\n alphas = np.zeros(self.batch_size)\n n_items = 0\n index = 0\n\n while index < len(data):\n reduced_window = random.randint(0, self.window_size)\n if data[index] is not None:\n\n left = max(0, index - self.window_size + reduced_window)\n right = min((index + self.window_size + 1 -\n reduced_window), len(data) - 1)\n for pos2 in range(left, right, 1):\n\n if n_items == self.batch_size:\n queue.put((example, labels, index))\n example = np.zeros(self.batch_size)\n labels = np.zeros((self.batch_size, 1))\n n_items = 0\n\n if pos2 != index and data[pos2] is not None:\n example[n_items] = data[pos2]\n labels[n_items] = data[index]\n alpha = self.learning_rate - \\\n (self.learning_rate - 0.001) * (index / self.n_words)\n alphas[n_items] = max(0.001, alpha)\n n_items += 1\n index += 1\n\n # Poison pills\n for _ in range(n_workers):\n queue.put(None)", "def build(self, block_size):", "def _init_queued(volume):\n queued = np.zeros(volume.shape)\n queued[0, :, :] = 1\n queued[volume.shape[0]-1, :, :] = 1\n queued[:, 0, :] = 1\n queued[:, volume.shape[1]-1, :] = 1\n queued[:, :, 0] = 1\n queued[:, :, volume.shape[2]-1] = 1\n return queued", "def prepare_label(input_batch, new_size, num_classes, one_hot=True, task='seg'):\n with tf.name_scope('label_encode'):\n input_batch = tf.image.resize_nearest_neighbor(input_batch, new_size) # as labels are integer numbers, need to use NN interp.\n if task == 'seg':\n input_batch = tf.squeeze(input_batch, squeeze_dims=[3]) # reducing the channel dimension.\n if one_hot:\n input_batch = tf.one_hot(input_batch, depth=num_classes)\n return input_batch", "def ptb_producer(raw_data, batch_size, num_steps, word_to_id):\n x = []\n y = []\n n_batches = len(raw_data) // batch_size\n for sentence in raw_data:\n mask_index = get_mask_index(sentence)\n current_label = sentence[mask_index]\n sentence[mask_index] = word_to_id['<mask>']\n y.append(current_label)\n x.append(sentence)\n x = np.array(x)\n x = x[:n_batches*batch_size]\n x = np.reshape(x, [n_batches, batch_size, num_steps])\n y = np.array(y)\n y = y[:n_batches * batch_size]\n y = np.reshape(y, [n_batches, batch_size])\n return x, y", "def unbucketed_next(self):\n # Initialize batch containers\n label_batch = list()\n enc_input_batch = list()\n dec_input_batch = list()\n # Fill individual batches by iterating over the entire data source\n if self.sent_id < self.get_length():\n while len(enc_input_batch) < self.opt.batch_size:\n try:\n indexed_sent = self.data[self.sent_id]\n label_item = indexed_sent[1:]\n enc_input_item = indexed_sent[1:]\n # Reverse the input to the encoder, see arxiv.org/pdf/1703.03906.pdf\n enc_input_item.reverse()\n dec_input_item = indexed_sent[:-1]\n label_batch.append(label_item)\n enc_input_batch.append(enc_input_item)\n dec_input_batch.append(dec_input_item)\n self.sent_id += 1\n except IndexError:\n break\n else:\n raise IndexError\n return label_batch, enc_input_batch, dec_input_batch", "def _generate_bottlenecked_batch(fc6, pool, mask, min_queue_examples,\n batch_size, shuffle):\n # Create a queue that shuffles the examples, and then\n # read 'batch_size' images + labels from the example queue.\n num_preprocess_threads = 16\n if shuffle:\n fc6_batch, pool_batch, mask_batch = tf.train.shuffle_batch(\n [fc6, pool, mask],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size,\n min_after_dequeue=min_queue_examples)\n else:\n fc6_batch, pool_batch, mask_batch = tf.train.batch(\n [fc6,pool, mask],\n batch_size=batch_size,\n num_threads=num_preprocess_threads,\n capacity=min_queue_examples + 3 * batch_size)\n # Display the masks in the visualizer.\n tf.image_summary('masks', 255*mask_batch[:,:,:,:1])\n # print(images.get_shape())\n # print(label_batch.get_shape())\n return fc6_batch, pool_batch, mask_batch", "def next(self):\n #print('next')\n batch_size = self.batch_size\n batch_data = nd.empty((batch_size,)+self.data_shape)\n batch_label = nd.empty((batch_size,)+self.label_shape)\n i = 0\n #self.cutoff = random.randint(800,1280)\n try:\n while i < batch_size:\n #print('N', i)\n data, label, annot = self.next_sample()\n R = self.get_data(data, label, annot)\n if R is None:\n continue\n data_out, label_out, flip_data_out, flip_label_out = R\n if not self.use_coherent:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n i += 1\n else:\n data = nd.array(data_out)\n data = nd.transpose(data, axes=(2, 0, 1))\n label = nd.array(label_out)\n data2 = nd.array(flip_data_out)\n data2 = nd.transpose(data2, axes=(2, 0, 1))\n label2 = nd.array(flip_label_out)\n #M = nd.array(M)\n #print(data.shape, label.shape)\n batch_data[i][:] = data\n batch_label[i][:] = label\n #i+=1\n j = i+self.per_batch_size//2\n batch_data[j][:] = data2\n batch_label[j][:] = label2\n i += 1\n if j%self.per_batch_size==self.per_batch_size-1:\n i = j+1\n except StopIteration:\n if i<batch_size:\n raise StopIteration\n\n #return {self.data_name : batch_data,\n # self.label_name : batch_label}\n #print(batch_data.shape, batch_label.shape)\n return mx.io.DataBatch([batch_data], [batch_label], batch_size - i)", "def create_mock_batch(\n batch_size,\n sequence_length,\n number_of_nodes,\n edge_per_node,\n in_channels,\n number_of_classes,\n):\n batch = torch.zeros(batch_size, sequence_length, number_of_nodes, in_channels)\n batch_targets = torch.zeros(batch_size, number_of_nodes, dtype=torch.long)\n\n for b in range(batch_size):\n input_sequence, targets, edge_index, edge_weight = create_mock_sequence(\n sequence_length,\n number_of_nodes,\n edge_per_node,\n in_channels,\n number_of_classes,\n )\n batch[b] = input_sequence\n batch_targets[b] = targets\n\n return batch, batch_targets, edge_index, edge_weight", "def __call__(self, batch):\r\n '''\r\n for i in range(len(batch)):\r\n if batch[i].shape[1] != 861:\r\n batch[i] = batch[i - 1]\r\n '''\r\n return torch.tensor(batch)#torch.stack(batch, dim = 0)\r", "def prepare_batches(self, pairs, batch_size):\n\t\treturn MATHBatch.create_from_items(pairs, batch_size)", "def batched_drawing(self, batch):\n Label(self.text, font_name=self.font_name, font_size=self.font_size,\n x=self.pos.x, y=self.pos.y, anchor_x=self.ANCHOR_X, anchor_y=self.ANCHOR_Y,\n batch=batch)", "def next_batch(self) -> Block:\n # If no batch size, short-circuit.\n if self._batch_size is None:\n assert len(self._buffer) == 1\n block = self._buffer[0]\n self._buffer = []\n return block\n output = DelegatingArrowBlockBuilder()\n leftover = []\n needed = self._batch_size\n for block in self._buffer:\n accessor = BlockAccessor.for_block(block)\n if needed <= 0:\n # We already have a full batch, so add this block to\n # the leftovers.\n leftover.append(block)\n elif accessor.num_rows() <= needed:\n # We need this entire block to fill out a batch.\n output.add_block(block)\n needed -= accessor.num_rows()\n else:\n # We only need part of the block to fill out a batch.\n output.add_block(accessor.slice(0, needed, copy=False))\n # Add the rest of the block to the leftovers.\n leftover.append(\n accessor.slice(needed, accessor.num_rows(), copy=False))\n needed = 0\n\n # Move the leftovers into the block buffer so they're the first\n # blocks consumed on the next batch extraction.\n self._buffer = leftover\n return output.build()", "def pack_batch(label_encoder, batch, device=None):\n (word, char), tasks = label_encoder.transform(batch)\n\n word = torch_utils.pad_batch(word, label_encoder.word.get_pad(), device=device)\n char = torch_utils.pad_batch(char, label_encoder.char.get_pad(), device=device)\n\n output_tasks = {}\n for task, data in tasks.items():\n output_tasks[task] = torch_utils.pad_batch(\n data, label_encoder.tasks[task].get_pad(), device=device)\n\n return (word, char), output_tasks", "def batched_drawing(self, batch):\n self.label_object = Label(self.text, font_name=self.font_name, font_size=self.font_size,\n x=self.pos.x, y=self.pos.y, anchor_x=self.ANCHOR_X, anchor_y=self.ANCHOR_Y,\n batch=batch)\n\n # Rectangle objects are added to the batch \"automatically\" since it's a subclass of GraphicsObject.\n self.rectangle_object = Rectangle(self.width, self.height, pos=self.pos, hidden=True)", "def sample_batch(pid, args, batch_queue, port_dict, device, actor_id_to_ip_dataport, local_size, cache_array):\n def recv_data(k, data_stream, actor_set, real_data_tasks_i):\n for real_data in data_stream:\n tmp = []\n tmp.append(real_data.state)\n tmp.append(real_data.action)\n tmp.append(real_data.reward)\n tmp.append(real_data.next_state)\n tmp.append(real_data.done)\n tmp.append(actor_set[k]['w'][real_data.idx])\n tmp.append(actor_set[k]['i'][real_data.idx])\n tmp.append(actor_set[k]['t'][real_data.idx])\n tmp.append(real_data.timestamp)\n local_dict[actor_set[k]['i'][real_data.idx]] = tmp\n cache_array[actor_set[k]['i'][real_data.idx]] |= 2**pid\n decom_state = torch.FloatTensor(np.frombuffer(zlib.decompress(real_data.state), dtype=np.uint8).reshape((1, 4, 84, 84)))\n real_data_tasks_i['states'].append(decom_state) #.to(device))\n real_data_tasks_i['actions'].append(torch.LongTensor([real_data.action])) #.to(device))\n real_data_tasks_i['rewards'].append(torch.FloatTensor([real_data.reward])) #.to(device))\n decom_next_state = torch.FloatTensor(np.frombuffer(zlib.decompress(real_data.next_state), dtype=np.uint8).reshape((1, 4, 84, 84)))\n real_data_tasks_i['next_states'].append(decom_next_state) #.to(device))\n real_data_tasks_i['dones'].append(torch.FloatTensor([real_data.done])) #.to(device))\n real_data_tasks_i['batch_weights'].append(torch.FloatTensor([actor_set[k]['w'][real_data.idx]])) #.to(device))\n real_data_tasks_i['batch_idxes'].append(actor_set[k]['i'][real_data.idx])\n # is the data overwrited?\n real_data_tasks_i['batch_timestamp_store'].append(actor_set[k]['t'][real_data.idx])\n real_data_tasks_i['batch_timestamp_real'].append(real_data.timestamp)\n conn = grpc.insecure_channel(port_dict['replay_ip'] + ':' + port_dict['sampleDataPort'])\n client = apex_data_pb2_grpc.SampleDataStub(channel=conn)\n local_dict = {}\n while True:\n batch_timestamp_real = []\n batch_timestamp_store = []\n batch_weights = []\n batch_idxes = []\n\n states, actions, rewards, next_states, dones = [], [], [], [], []\n\n res_batch = client.Send(apex_data_pb2.SampleDataRequest(batch_size=args.batch_size, beta = args.beta))\n actor_ids, data_ids, timestamps, weights, idxes = res_batch.actor_ids, res_batch.data_ids, res_batch.timestamp, res_batch.weights, res_batch.idxes\n actor_set = {}\n cached_value = {'states':{},'actions':{},'rewards':{},'next_states':{},'dones':{},'batch_weights':{},'batch_idxes':{},'batch_timestamp_store':{},'batch_timestamp_real':{}}\n for i in range(len(actor_ids)):\n set_a = actor_set.get(actor_ids[i], False)\n if set_a == False:\n actor_set[actor_ids[i]] = {}\n set_a = actor_set[actor_ids[i]]\n set_a['d'] = []\n set_a['w'] = []\n set_a['i'] = []\n set_a['t'] = []\n cached_value['states'][actor_ids[i]] = []\n cached_value['actions'][actor_ids[i]] = []\n cached_value['rewards'][actor_ids[i]] = []\n cached_value['next_states'][actor_ids[i]] = []\n cached_value['dones'][actor_ids[i]] = []\n cached_value['batch_weights'][actor_ids[i]] = []\n cached_value['batch_idxes'][actor_ids[i]] = []\n cached_value['batch_timestamp_store'][actor_ids[i]] = []\n cached_value['batch_timestamp_real'][actor_ids[i]] = []\n cache_id = actor_ids[i]*local_size+data_ids[i]\n cache_trans = cache_array[cache_id]\n if cache_trans & 2**pid == 0:\n set_a['d'].append(data_ids[i])\n set_a['w'].append(weights[i])\n set_a['i'].append(idxes[i])\n set_a['t'].append(timestamps[i])\n if cache_trans == 0 and local_dict.get(cache_id, False) != False:\n del local_dict[cache_id]\n else:\n try:\n state_tmp = local_dict[cache_id][0]\n action_tmp = local_dict[cache_id][1]\n reward_tmp = local_dict[cache_id][2] \n next_state_tmp = local_dict[cache_id][3] \n done_tmp = local_dict[cache_id][4] \n batch_weight_tmp = local_dict[cache_id][5] \n batch_idx_tmp = local_dict[cache_id][6] \n batch_store_tmp = local_dict[cache_id][7] \n batch_real_tmp = local_dict[cache_id][8] \n decom_state = torch.FloatTensor(np.frombuffer(zlib.decompress(state_tmp), dtype=np.uint8).reshape((1, 4, 84, 84)))\n cached_value['states'][actor_ids[i]].append(decom_state)\n cached_value['actions'][actor_ids[i]].append(torch.LongTensor([action_tmp]))\n cached_value['rewards'][actor_ids[i]].append(torch.FloatTensor([reward_tmp]))\n decom_next_state = torch.FloatTensor(np.frombuffer(zlib.decompress(next_state_tmp), dtype=np.uint8).reshape((1, 4, 84, 84)))\n cached_value['next_states'][actor_ids[i]].append(decom_next_state)\n cached_value['dones'][actor_ids[i]].append(torch.FloatTensor([done_tmp]))\n cached_value['batch_weights'][actor_ids[i]].append(torch.FloatTensor([batch_weight_tmp]))\n cached_value['batch_idxes'][actor_ids[i]].append(batch_idx_tmp)\n cached_value['batch_timestamp_store'][actor_ids[i]].append(batch_store_tmp)\n cached_value['batch_timestamp_real'][actor_ids[i]].append(batch_real_tmp)\n except:\n set_a['d'].append(data_ids[i])\n set_a['w'].append(weights[i])\n set_a['i'].append(idxes[i])\n set_a['t'].append(timestamps[i])\n real_data_links = {}\n real_data_tasks = {}\n for k, v in actor_set.items():\n actor_ip, data_port = actor_id_to_ip_dataport[k]\n conn_actor = grpc.insecure_channel(actor_ip + ':' + data_port)\n client_actor = apex_data_pb2_grpc.SendRealDataStub(channel=conn_actor)\n real_data_links[k] = client_actor.Send(apex_data_pb2.RealBatchRequest(idxes=v['d']))\n real_data_tasks[k] = {}\n real_data_tasks[k]['states'] = cached_value['states'][k]\n real_data_tasks[k]['actions'] = cached_value['actions'][k]\n real_data_tasks[k]['rewards'] = cached_value['rewards'][k]\n real_data_tasks[k]['next_states'] = cached_value['next_states'][k]\n real_data_tasks[k]['dones'] = cached_value['dones'][k]\n real_data_tasks[k]['batch_weights'] = cached_value['batch_weights'][k]\n real_data_tasks[k]['batch_idxes'] = cached_value['batch_idxes'][k]\n real_data_tasks[k]['batch_timestamp_store'] = cached_value['batch_timestamp_store'][k]\n real_data_tasks[k]['batch_timestamp_real'] = cached_value['batch_timestamp_real'][k]\n threads = []\n for k, v in real_data_links.items():\n t = threading.Thread(target=recv_data, args=(k, v, actor_set, real_data_tasks[k],))\n threads.append(t)\n t.start()\n\n for t in threads:\n t.join()\n\n for k, v in real_data_tasks.items():\n states += v['states']\n actions += v['actions']\n rewards += v['rewards']\n next_states += v['next_states']\n dones += v['dones']\n batch_weights += v['batch_weights']\n batch_idxes += v['batch_idxes']\n batch_timestamp_real += v['batch_timestamp_real']\n batch_timestamp_store += v['batch_timestamp_store']\n\n states = torch.cat(states,0).to(device)\n actions = torch.cat(actions,0).to(device)\n rewards = torch.cat(rewards,0).to(device)\n next_states = torch.cat(next_states,0).to(device)\n dones = torch.cat(dones,0).to(device)\n batch_weights = torch.cat(batch_weights,0).to(device)\n\n batch = [states, actions, rewards, next_states, dones, batch_weights, batch_idxes]\n batch_queue.put(batch)\n data, batch = None, None", "def _create_chunks(opts, inputs, idx1, idx2):\n # idx2 = 75\n # idx1 = 71\n num_batch = idx2 - idx1\n # img1 = torch.zeros(num_batch, 1, 10, 224, 224)\n # img2 = torch.zeros(num_batch, 1, 10, 224, 224)\n # labels = torch.zeros(num_batch)\n\n feat1_list = []\n label_list = []\n for i in range(num_batch):\n curr_idx = i + idx1\n frames = range(curr_idx - 5, curr_idx + 5)\n temp1 = _load_chunk(opts, inputs, frames)\n feat1_list.append(temp1)\n\n temp_label = inputs[1][curr_idx, :].nonzero()\n if len(temp_label.size()) == 0:\n temp_label = 6\n else:\n if temp_label.size()[0] != 0:\n temp_label = temp_label[0][0]\n label_list.append(temp_label)\n\n feat1 = torch.cat(feat1_list, dim=0)\n labels = torch.LongTensor(label_list)\n return feat1, labels", "def generate_batch(batch_size, num_skips, skip_window):\n # global keyword gives this function access to global variable data_index\n global data_index\n assert batch_size % num_skips == 0 \n assert num_skips <= 2 * skip_window\n batch = np.ndarray(shape=(batch_size), dtype=np.int32)\n labels = np.ndarray(shape=(batch_size, 1), dtype=np.int32)\n span = 2 * skip_window + 1\n # Create a double-ended queue (both stack and queue) for word buffer\n # maxlen - keeping a fixed sliding window \n buffer = collections.deque(maxlen=span)\n for _ in range(span):\n # Shift the skipgram window to the left by 1\n buffer.append(data[data_index])\n # Increase data_index for next shift\n data_index = (data_index + 1) % len(data)\n for i in range(batch_size // num_skips):\n # target label at the center of the buffer \n target = skip_window \n # avoid the target word and later selected words\n targets_to_avoid = [ skip_window ]\n for j in range(num_skips):\n while target in targets_to_avoid:\n target = random.randint(0, span - 1)\n targets_to_avoid.append(target)\n # batch is the same word for current num_skip\n batch[i * num_skips + j] = buffer[skip_window]\n labels[i * num_skips + j, 0] = buffer[target]\n buffer.append(data[data_index])\n data_index = (data_index + 1) % len(data)\n return batch, labels", "def prepare_label(self, input_batch, new_size):\n with tf.name_scope('label_encode'):\n input_batch = tf.image.resize_nearest_neighbor(input_batch,\n new_size) # As labels are integer numbers, need to use NN interp.\n input_batch = tf.squeeze(input_batch, axis=[3]) # Reducing the channel dimension.\n input_batch = tf.one_hot(input_batch, depth=self.n_classes)\n return input_batch", "def next_batch(self):\n if self.ptr + self.batch_size >= self.size:\n head = 0\n tail = self.batch_size\n self.ptr = self.batch_size\n else:\n head = self.ptr\n tail = self.ptr + self.batch_size\n self.ptr += self.batch_size\n return self.train_x[head:tail, 0:self.fig_w**2], self.train_y[head:tail, 0:10]", "def b(i,new_arr,arr):\n new_arr = tf.concat([new_arr,arr[:, i:last_step + i:stride, :]], axis=2)\n return i+1,new_arr,arr", "def _batch_train(self, batch, training_step, step):\n lstm_size = (self.batch_size, self.Qmain.h_size)\n batch_mem = np.zeros(lstm_size)\n batch_carry = np.zeros(lstm_size)\n input_shape = (self.batch_size,\n self.trace_length,\n self.observation_size)\n m_data = np.vstack(batch[:, 0])\n m_data = m_data.reshape(input_shape)\n t_data = np.vstack(batch[:, 4])\n t_data = t_data.reshape(input_shape)\n q_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(m_data)]\n q1_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(t_data)]\n\n # Batch predict\n self.Qmain.trace_length.assign(self.trace_length)\n self.Qmain.dropout_rate.assign(0.0)\n self.Qtarget.trace_length.assign(self.trace_length)\n self.Qtarget.dropout_rate.assign(0.0)\n\n # Save the graph just the first time\n if training_step == 0:\n tf.summary.trace_on()\n\n # T batch predict\n pred = self.Qmain.model.predict(q_input,\n batch_size=self.batch_size)\n Q = pred[0]\n batch_bus = pred[1]\n batch_line = pred[2]\n batch_disp = pred[3]\n\n ## Log graph once and disable graph logging\n if training_step == 0:\n with self.tf_writer.as_default():\n tf.summary.trace_export(self.name + \"-graph\", step)\n\n # T+1 batch predict\n Qn, *_ = self.Qtarget.model.predict(q1_input,\n batch_size=self.batch_size)\n \n # Compute batch Q update to Qtarget\n for i in range(self.batch_size):\n idx = i * (self.trace_length - 1)\n a = batch[idx][1]\n grid = a[0]\n batch_bus[i][:] = a[1][:]\n batch_line[i][:] = a[2][:]\n batch_disp[i][:] = a[3][:]\n r = batch[idx][2]\n d = batch[idx][3]\n Q[i][grid] = r\n if d == False:\n Q[i][grid] += DISCOUNT_FACTOR * Qn[i][grid]\n\n # Batch train\n batch_x = [batch_mem, batch_carry, m_data]\n batch_y = [\n Q,\n batch_bus, batch_line, batch_disp,\n batch_mem, batch_carry\n ]\n loss = self.Qmain.model.train_on_batch(batch_x, batch_y)\n loss = loss[0]\n\n # Log to tensorboard\n self._tf_log_summary(loss, step)", "def worker(self):\n while True: # Feed forever. Enqueue will block when queue is full.\n while len(self.memory) < self.min_memory:\n time.sleep(1)\n batch = self.memory.sample(self.batchsize)\n states, actions, rewards, terminals = zip(*batch)\n self.session.run(self.enqueue_op, {\n self.states: states, self.actions: actions,\n self.rewards: rewards, self.terminals: terminals,\n })", "def next(self, batch_size):\n if self.batch_id == len(self.data):\n self.batch_id = 0\n batch_data = (self.data[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n batch_labels = (self.labels[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n batch_seqlen = (self.seqlen[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n self.batch_id = min(self.batch_id + batch_size, len(self.data))\n\n\n return batch_data, batch_labels, batch_seqlen", "def next(self, batch_size):\n if self.batch_id == len(self.data):\n self.batch_id = 0\n batch_data = (self.data[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n batch_labels = (self.labels[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n batch_seqlen = (self.seqlen[self.batch_id:min(self.batch_id + batch_size, len(self.data))])\n self.batch_id = min(self.batch_id + batch_size, len(self.data))\n\n\n return batch_data, batch_labels, batch_seqlen", "def pack_batch(self, batch, device=None):\n return pack_batch(self.label_encoder, batch, device or self.device)", "def process_state_batch(self, batch):\n # batch = np.squeeze(batch, axis=1)\n batch = np.array([np.concatenate(obs, axis=-1) for obs in batch])\n return batch", "def _get_batch(batch, ctx):\n if isinstance(batch, mx.io.DataBatch):\n data = batch.data[0]\n label = batch.label[0]\n else:\n data, label = batch\n return (gluon.utils.split_and_load(data, ctx),\n gluon.utils.split_and_load(label, ctx),\n data.shape[0])", "def __init__(self):\n self.num_mini_batches = 0", "def bucketed_next(self):\n # Initialize batch containers\n label_batch = list()\n enc_input_batch = list()\n dec_input_batch = list()\n if self.bucket_id < self.opt.num_buckets:\n # Fill individual batches by iterating over bucket contents\n while len(enc_input_batch) < self.opt.batch_size:\n try:\n indexed_sent = self.data[self.bucket_id][self.sent_id]\n label_item = indexed_sent[1:]\n enc_input_item = indexed_sent[1:]\n # Reverse the input to the encoder, see arxiv.org/pdf/1703.03906.pdf\n enc_input_item.reverse()\n dec_input_item = indexed_sent[:-1]\n label_batch.append(label_item)\n enc_input_batch.append(enc_input_item)\n dec_input_batch.append(dec_input_item)\n self.sent_id += 1\n except IndexError:\n # Finish batch prematurely if current bucket has been exhausted, i.e. no mixed-bucket batches\n self.sent_id = 0\n self.bucket_id += 1\n break\n # Check if bucket is empty, to prevent empty batches from being generated\n try:\n if self.sent_id == len(self.data[self.bucket_id]):\n self.bucket_id += 1\n except IndexError:\n pass\n else:\n raise IndexError\n return label_batch, enc_input_batch, dec_input_batch" ]
[ "0.6270515", "0.5756146", "0.57460064", "0.5700081", "0.5598213", "0.5595119", "0.54683524", "0.5448528", "0.5440111", "0.5430576", "0.54280764", "0.5404559", "0.53941274", "0.5314276", "0.5312027", "0.5303626", "0.5300449", "0.529997", "0.5289015", "0.52655345", "0.5265391", "0.5247271", "0.5242453", "0.52284527", "0.52284527", "0.52257305", "0.52235156", "0.52104235", "0.5200602", "0.519407" ]
0.62710935
0
Reads data from a binary file of cell image data. Create an object with information about sequence and batch that will be filled with data obtained from the queue by the FixedLengthRecordReader
def _read_from_file(queue, config, class_label): class SequenceRecord(object): pass result = SequenceRecord() # Dimensions of the images and the bytes they each take # up in the binary file result.height = config.image_size result.width = config.image_size result.depth = config.image_depth result.sequence_length = config.num_steps result.image_bytes = (result.height * result.width * result.depth) result.patient_ID_bytes = 5 #uint8 initial_image_name_bytes = 92 #uint8 result.num_features = config.num_features result.one_feature_bytes = 8 result.feature_bytes = config.num_features * result.one_feature_bytes # float64 result.coord_bytes = config.num_steps*2*6 # x and y coords, uint32 record_bytes = result.image_bytes * result.sequence_length + result.coord_bytes + result.patient_ID_bytes + initial_image_name_bytes + result.feature_bytes # The amount of padding on the image_name must be adjusted based on the number of features # because the overall number of bytes must be a multiple of 8 for float64 processing of raw output. increment = 8 - (record_bytes % 8) result.image_name_bytes = initial_image_name_bytes + increment record_bytes += increment # Create reader with the fixed record length and # read off one record reader = tf.FixedLengthRecordReader(record_bytes=record_bytes) result.key, value = reader.read(queue) # Convert from a string to a vector of uint8 that is record_bytes long. record_data = tf.decode_raw(value, tf.uint8, name='decode_raw_uint8') feature_data = tf.decode_raw(value, tf.float64, name='decode_raw_float64') index = 0 next_index = result.patient_ID_bytes result.subject_id, index = process_slice(index, result.patient_ID_bytes, record_data) result.image_name, index = process_slice(index, result.image_name_bytes, record_data) result.patch_coords, index = process_slice(index, result.coord_bytes, record_data) # features are taken from float64 stream, they are taken out as a single block of data. feature_index = index // result.one_feature_bytes result.features, feature_index = process_removal_slice(feature_index, result.num_features, feature_data, config.remove_feature) _ , index = process_slice(index, result.feature_bytes, record_data) sequence_data = tf.strided_slice(record_data, [index], [record_bytes]) # Treat sequence as an image of dimensions [(steps * patch height), width, depth] and normalize per image # Then reshape back to a single sequence with tf.device("/cpu:0"): normalized_sequence = tf.reshape(sequence_data, [result.sequence_length*result.height,result.width, result.depth]) normalized_sequence = tf.image.per_image_standardization(normalized_sequence) result.sequence = tf.reshape(normalized_sequence, [result.sequence_length, result.height * result.width * result.depth]) #result.image_bytes]) result.sequence = tf.cast(result.sequence, tf.float32) result.label = tf.constant(class_label, shape=[1]) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _read_data(self, fh, byteorder='>'):\r\n fh.seek(len(self.header))\r\n data = fh.read()\r\n dtype = 'u1' if self.maxval < 256 else byteorder + 'u2'\r\n depth = 1 if self.magicnum == b\"P7 332\" else self.depth\r\n shape = [-1, self.height, self.width, depth]\r\n size = numpy.prod(shape[1:])\r\n if self.magicnum in b\"P1P2P3\":\r\n data = numpy.array(data.split(None, size)[:size], dtype)\r\n data = data.reshape(shape)\r\n elif self.maxval == 1:\r\n shape[2] = int(math.ceil(self.width / 8))\r\n data = numpy.frombuffer(data, dtype).reshape(shape)\r\n data = numpy.unpackbits(data, axis=-2)[:, :, :self.width, :]\r\n else:\r\n data = numpy.frombuffer(data, dtype)\r\n data = data[:size * (data.size // size)].reshape(shape)\r\n if data.shape[0] < 2:\r\n data = data.reshape(data.shape[1:])\r\n if data.shape[-1] < 2:\r\n data = data.reshape(data.shape[:-1])\r\n if self.magicnum == b\"P7 332\":\r\n rgb332 = numpy.array(list(numpy.ndindex(8, 8, 4)), numpy.uint8)\r\n rgb332 *= [36, 36, 85]\r\n data = numpy.take(rgb332, data, axis=0)\r\n return data", "def _read_data(self):\n with self._open(self.filename, 'rb') as f:\n try:\n f.seek(self._offset_data, self._offset_whence)\n except IOError:\n print('Error: hedp.io.HamamatsuFile seeking outside of file limits.')\n print(' Failed to parse file.')\n print(\" Either the 'offset' or 'dtype' input arguments must be wrong!\")\n raise\n except:\n raise\n\n data_len = np.prod(self.shape)*np.dtype(self._dtype).itemsize\n data_str = f.read(data_len)\n if data_len != len(data_str):\n print(data_len, len(data_str))\n raise ValueError('File ended before all data was read. Probably wrong offset or dtype!')\n\n\n self.data = np.fromstring(data_str, dtype=self._dtype).reshape(self.shape[::-1])\n self.data = np.ndarray.astype(self.data, 'float32')\n\n #self.data = np.fromfile(f, dtype=self._dtype,\n # count=np.prod(self.shape)).reshape(self.shape[::-1])", "def _record_reader(stream):\n while True:\n header = stream.read(4)\n if len(header) < 4:\n return\n size, rec_type = struct.unpack(\">HH\", header)\n data_type = rec_type & 0x00FF\n rec_type = rec_type // 256\n data = None\n if size > 4:\n if data_type == 0x01:\n data = numpy.array(\n struct.unpack(\n \">{0}H\".format((size - 4) // 2), stream.read(size - 4)\n ),\n dtype=\"uint\",\n )\n elif data_type == 0x02:\n data = numpy.array(\n struct.unpack(\n \">{0}h\".format((size - 4) // 2), stream.read(size - 4)\n ),\n dtype=\"int\",\n )\n elif data_type == 0x03:\n data = numpy.array(\n struct.unpack(\n \">{0}l\".format((size - 4) // 4), stream.read(size - 4)\n ),\n dtype=\"int\",\n )\n elif data_type == 0x05:\n data = numpy.array(\n [\n _eight_byte_real_to_float(stream.read(8))\n for _ in range((size - 4) // 8)\n ]\n )\n else:\n data = stream.read(size - 4)\n if str is not bytes:\n if data[-1] == 0:\n data = data[:-1].decode(\"ascii\")\n else:\n data = data.decode(\"ascii\")\n elif data[-1] == \"\\0\":\n data = data[:-1]\n yield [rec_type, data]", "def _read(self):\n # initializng data dictionary\n self.data={}\n\n f = FortranFile(self.filename)\n # Default omnivor binary header\n self.data['MK'] = f.readInts('i')\n self.data['itime'] = f.readInts('i')\n self.data['version'] = f.readString()\n self.data['file_id'] = f.readInts('i')\n self.data['sversion'] = f.readString()\n # Velocity field\n self.data['stype'] = f.readString()\n self.data['is_grid'] = f.readInts('i')\n nCPs = f.readInts('i')\n self.data['nCPs'] = nCPs\n if self.data['MK'] == 8:\n real_char='d'\n else:\n real_char='f'\n if self.data['is_grid']:\n #print('File is a velocity grid file')\n n1 = f.readInts('i')\n n2 = f.readInts('i')\n n3 = f.readInts('i')\n self.data['n1'] = n1\n self.data['n2'] = n2\n self.data['n3'] = n3\n self.data['is_straight'] = f.readInts('i')\n self.data['v1'] = f.readReals(real_char)\n self.data['v2'] = f.readReals(real_char)\n self.data['v3'] = f.readReals(real_char)\n\n CPs_raw = f.readReals(real_char)\n Utot_raw = f.readReals(real_char)\n CPs = np.reshape(CPs_raw,(3,nCPs),order = 'F')\n Utot = np.reshape(Utot_raw,(3,nCPs),order = 'F')\n\n acc=-1\n CPsTab = np.zeros((3, n1,n2,n3))\n UtotTab = np.zeros((3, n1,n2,n3))\n # Reshaping the nasty way (this is natural order). \n for i in range(0,n1):\n for j in range(0,n2):\n for k in range(0,n3):\n acc=acc+1\n CPsTab[0:3,i,j,k] = CPs[0:3,acc]\n UtotTab[0:3,i,j,k] = Utot[0:3,acc]\n\n self.data['CPs'] = CPs\n self.data['CPsTab'] = CPsTab\n self.data['Utot'] = Utot\n self.data['UtotTab'] = UtotTab", "def deserialize_numpy(self, str, numpy):\n try:\n if self.base is None:\n self.base = rwrc12_msgs.msg.CellBase()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.base.header.seq, _x.base.header.stamp.secs, _x.base.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.base.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.base.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 20\n (_x.base.cell_width, _x.base.cell_height, _x.base.position.x, _x.base.position.y, _x.base.position.z,) = _struct_5f.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.base.points = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Point32()\n _x = val1\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n self.base.points.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n self.base.intensity = numpy.frombuffer(str[start:end], dtype=numpy.float32, count=length)\n start = end\n end += 1\n (self.base.cost,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.base.label = str[start:end].decode('utf-8')\n else:\n self.base.label = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.mean_height, _x.mean_intensity,) = _struct_2f.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def deserialize(self, str):\n try:\n if self.base is None:\n self.base = rwrc12_msgs.msg.CellBase()\n end = 0\n _x = self\n start = end\n end += 12\n (_x.base.header.seq, _x.base.header.stamp.secs, _x.base.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.base.header.frame_id = str[start:end].decode('utf-8')\n else:\n self.base.header.frame_id = str[start:end]\n _x = self\n start = end\n end += 20\n (_x.base.cell_width, _x.base.cell_height, _x.base.position.x, _x.base.position.y, _x.base.position.z,) = _struct_5f.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n self.base.points = []\n for i in range(0, length):\n val1 = geometry_msgs.msg.Point32()\n _x = val1\n start = end\n end += 12\n (_x.x, _x.y, _x.z,) = _struct_3f.unpack(str[start:end])\n self.base.points.append(val1)\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n pattern = '<%sf'%length\n start = end\n end += struct.calcsize(pattern)\n self.base.intensity = struct.unpack(pattern, str[start:end])\n start = end\n end += 1\n (self.base.cost,) = _struct_b.unpack(str[start:end])\n start = end\n end += 4\n (length,) = _struct_I.unpack(str[start:end])\n start = end\n end += length\n if python3:\n self.base.label = str[start:end].decode('utf-8')\n else:\n self.base.label = str[start:end]\n _x = self\n start = end\n end += 8\n (_x.mean_height, _x.mean_intensity,) = _struct_2f.unpack(str[start:end])\n return self\n except struct.error as e:\n raise genpy.DeserializationError(e) #most likely buffer underfill", "def _read_input(filename_queue):\n label_bytes = 1\n height = 32\n depth = 3\n image_bytes = height * height * depth\n record_bytes = label_bytes + image_bytes\n\n reader = tf.compat.v1.FixedLengthRecordReader(record_bytes=record_bytes)\n _, byte_data = reader.read(filename_queue)\n uint_data = tf.io.decode_raw(byte_data, tf.uint8)\n\n label = tf.cast(tf.strided_slice(uint_data, [0], [label_bytes]), tf.int32)\n label.set_shape([1])\n\n depth_major = tf.reshape(\n tf.strided_slice(uint_data, [label_bytes], [record_bytes]),\n [depth, height, height])\n image = tf.cast(tf.transpose(a=depth_major, perm=[1, 2, 0]), tf.float32)\n\n return image, label", "def binary_read(filenames, gulp_size, gulp_nframe, dtype, *args, **kwargs):\n return BinaryFileReadBlock(filenames, gulp_size, gulp_nframe, dtype, *args, **kwargs)", "def readInstance(self):\n file = open(self.fName, 'r')\n self.genSize = int(file.readline())\n self.data = {}\n for line in file:\n (id, x, y) = line.split()\n self.data[int(id)] = (int(x), int(y))\n file.close()", "def read(self, fname):\r\n self.header = {}\r\n self.resetvals()\r\n infile = self._open(fname, \"rb\")\r\n self._readheader(infile)\r\n # Compute image size\r\n try:\r\n self.dim1 = int(self.header['NumberOfRowsInFrame'])\r\n self.dim2 = int(self.header['NumberOfColsInFrame'])\r\n self.bpp = int(self.header['BitsPerPixel'])\r\n except:\r\n raise Exception(\"GE file\", str(fname) + \\\r\n \"is corrupt, cannot read it\")\r\n\r\n # More than one image can be saved in a GE file\r\n # Will only load the first one\r\n\r\n\r\n # Go to the beginning of the file\r\n infile.seek(0)\r\n infile.seek(self.header['HeaderSizeInBytes'] + self.header['UserHeaderSizeInBytes'])\r\n\r\n ReadBytes = self.dim1 * self.dim2 * (self.bpp / 8)\r\n block = infile.read(ReadBytes)\r\n block = N.fromstring(block, N.uint16)\r\n\r\n infile.close()\r\n\r\n try:\r\n self.data = N.reshape(block, [self.dim2, self.dim1])\r\n except:\r\n print len(block), self.dim2, self.dim1\r\n raise IOError, \\\r\n 'Size spec in GE-header does not match size of image data field'\r\n\r\n self.bytecode = self.data.dtype.type\r\n self.pilimage = None\r\n return self", "def read(self, fname):\n # decoding following the description at:\n # http://www.matthewflickinger.com/lab/whatsinagif/bits_and_bytes.asp\n\n self.file_name = fname\n\n self._get_file_content()\n\n self._decode_header()\n\n # create output image\n self.output_image = [[[0 for c in range(3)] \\\n for y in range(self.canvas_height)] \\\n for x in range(self.canvas_width)]\n\n # the global color table will take up 3*2^(N+1) bytes in the stream.\n self.data_idx = 13+3*self.glob_col_table_sz\n\n color_table_bytes = self.file_content[13:self.data_idx]\n self._read_color_table(color_table_bytes)\n\n self._handle_extensions_blocks()\n\n self._handle_image_descriptors()\n\n if self.file_content[self.data_idx] != 0x3b:\n raise Exception('Decoding of the GIF failed')", "def read_and_decode(filename_queue, shape=None):\n label_bytes = 1\n width = shape[0]\n height = shape[1]\n depth = shape[2]\n record_byte_length = label_bytes + width * height\n\n with tf.name_scope(\"read_and_decode\"):\n # Length of record bytes in the dataset\n # Defined in utils module\n reader = tf.TFRecordReader()\n key, record_string = reader.read(filename_queue)\n\n feature_map = {\n \"image/encoded\": tf.FixedLenFeature(\n shape=[], dtype=tf.string)\n }\n parsed = tf.parse_single_example(record_string, feature_map)\n record_bytes = tf.decode_raw(parsed[\"image/encoded\"], tf.int8)\n\n # first byte is the label\n label = tf.cast(tf.strided_slice(record_bytes,\n begin=[0],\n end=[label_bytes]), tf.int32)\n # label = tf.reshape(label, [1])\n # print(label)\n\n # remaining bytes is the example\n example = tf.reshape(tf.strided_slice(record_bytes,\n begin=[label_bytes],\n end=[record_byte_length]), [width, height, depth])\n example = tf.cast(example, tf.float32)\n example.set_shape([width, height, depth])\n label.set_shape(1)\n label = tf.squeeze(label)\n # print(label)\n # label = tf.reshape(label, [0])\n\n return example, label", "def read_data(fields, filehandle, filename=None):\n data = np.zeros(0)\n # Determine the data type from the fields\n dtype = _determine_dtype(fields)\n # determine byte skip, line skip, and data file (there are two ways to write them)\n lineskip = fields.get('lineskip', fields.get('line skip', 0))\n byteskip = fields.get('byteskip', fields.get('byte skip', 0))\n datafile = fields.get('datafile', fields.get('data file', None))\n datafilehandle = filehandle\n if datafile is not None:\n # If the datafile path is absolute, don't muck with it. Otherwise\n # treat the path as relative to the directory in which the detached\n # header is in\n if os.path.isabs(datafile):\n datafilename = datafile\n else:\n datafilename = os.path.join(os.path.dirname(filename), datafile)\n datafilehandle = open(datafilename, 'rb')\n\n num_pixels = np.array(fields['sizes']).prod()\n # Seek to start of data based on lineskip/byteskip. byteskip == -1 is\n # only valid for raw encoding and overrides any lineskip\n if fields['encoding'] == 'raw' and byteskip == -1:\n datafilehandle.seek(-dtype.itemsize * num_pixels, 2)\n else:\n for _ in range(lineskip):\n datafilehandle.readline()\n\n if fields['encoding'] == 'raw':\n datafilehandle.seek(byteskip, os.SEEK_CUR)\n data = np.fromfile(datafilehandle, dtype)\n elif fields['encoding'] in ['ascii', 'text', 'txt']:\n datafilehandle.seek(byteskip, os.SEEK_CUR)\n data = np.fromfile(datafilehandle, dtype, sep=' ')\n else:\n # Probably the data is compressed then\n if fields['encoding'] == 'gzip' or \\\n fields['encoding'] == 'gz':\n decompobj = zlib.decompressobj(zlib.MAX_WBITS | 16)\n elif fields['encoding'] == 'bzip2' or \\\n fields['encoding'] == 'bz2':\n decompobj = bz2.BZ2Decompressor()\n else:\n raise NrrdError('Unsupported encoding: \"%s\"' % fields['encoding'])\n\n decompressed_data = b''\n while True:\n chunk = datafilehandle.read(_READ_CHUNKSIZE)\n if not chunk:\n break\n decompressed_data += decompobj.decompress(chunk)\n # byteskip applies to the _decompressed_ byte stream\n data = np.frombuffer(decompressed_data[byteskip:], dtype)\n\n if datafilehandle:\n datafilehandle.close()\n\n if num_pixels != data.size:\n raise NrrdError('ERROR: {0}-{1}={2}'.format(num_pixels, data.size, num_pixels - data.size))\n\n # dkh : eliminated need to reverse order of dimensions. nrrd's\n # data layout is same as what numpy calls 'Fortran' order,\n shape_tmp = list(fields['sizes'])\n data = np.reshape(data, tuple(shape_tmp), order='F')\n return data", "def _read_record(self, stream):\n header = stream.read(4)\n if len(header) < 4:\n return None\n size, rec_type = struct.unpack('>HH', header)\n data_type = (rec_type & 0x00ff)\n rec_type = rec_type // 256\n data = None\n if size > 4:\n if data_type == 0x01:\n data = numpy.array(\n struct.unpack('>{0}H'.format((size - 4) // 2),\n stream.read(size - 4)),\n dtype='uint')\n elif data_type == 0x02:\n data = numpy.array(\n struct.unpack('>{0}h'.format((size - 4) // 2),\n stream.read(size - 4)),\n dtype='int')\n elif data_type == 0x03:\n data = numpy.array(\n struct.unpack('>{0}l'.format((size - 4) // 4),\n stream.read(size - 4)),\n dtype='int')\n elif data_type == 0x05:\n data = numpy.array([\n _eight_byte_real_to_float(stream.read(8))\n for _ in range((size - 4) // 8)\n ])\n else:\n data = stream.read(size - 4)\n if str is not bytes:\n if data[-1] == 0:\n data = data[:-1].decode('ascii')\n else:\n data = data.decode('ascii')\n elif data[-1] == '\\0':\n data = data[:-1]\n return [rec_type, data]", "def _image_data(self):\n if self.header['Image']['bytes per pixel'] == 2:\n # 16-bit unsigned integers, short\n dt = np.dtype(self._bo + 'H')\n elif self.header['Image']['bytes per pixel'] == 4:\n # 32-bit unsigned integers, int\n dt = np.dtype(self._bo + 'I')\n\n shape = [self.header['Image']['planes'],\n self.header['Image']['masses'],\n self.header['Image']['height'],\n self.header['Image']['width']]\n\n self.fh.seek(self.header['header size'])\n\n compressedfiles = (\n gzip.GzipFile,\n bz2.BZ2File,\n tarfile.ExFileObject,\n lzma.LZMAFile,\n io.BytesIO\n )\n\n # fromfile is about 2x faster than frombuffer(fh.read())\n if isinstance(self.fh, compressedfiles):\n data = np.frombuffer(self.fh.read(), dtype=dt).reshape(shape)\n else:\n data = np.fromfile(self.fh, dtype=dt).reshape(shape)\n\n # We want to have a cube of contiguous data (stacked images) for each\n # mass. Swap axes 0 and 1. Returns a view, so make full copy to make\n # data access faster.\n data = data.swapaxes(0, 1).copy()\n\n self.data = xarray.DataArray(data,\n dims=('species', 'frame', 'y', 'x'),\n coords={'species': ('species', list(self.header['label list']))},\n attrs={'unit': 'counts'})", "def _load(self):\n # Extract the ASCII header (5 first lines)\n with open(self._xst_bin, 'rb') as f:\n header = list(islice(f, 0, 5))\n assert header[0] == b'HeaderStart\\n',\\\n 'Wrong header start'\n assert header[-1] == b'HeaderStop\\n',\\\n 'Wrong header stop'\n header = [s.decode('utf-8') for s in header]\n hd_size = sum([len(s) for s in header])\n\n # Parse informations into a metadata dictionnary\n keys = ['freq', 'ma', 'accu']\n search = ['Freq.List', 'Mr.List', 'accumulation']\n types = ['float64', 'int', 'int']\n for key, word, typ in zip(keys, search, types):\n for h in header:\n if word in h:\n self.meta[key] = np.array(\n h.split('=')[1].split(','),\n dtype=typ\n )\n\n # Deduce the dtype for decoding\n n_ma = self.meta['ma'].size\n n_sb = self.meta['freq'].size\n dtype = np.dtype(\n [('jd', 'float64'),\n ('data', 'complex64', (n_sb, n_ma*n_ma*2 + n_ma))]\n )\n\n # Decoding the binary file\n tmp = np.memmap(\n filename=self._xst_bin,\n dtype='int8',\n mode='r',\n offset=hd_size\n )\n decoded = tmp.view(dtype)\n\n self.data = decoded['data'] / self.meta['accu']\n self.time = Time(decoded['jd'], format='jd', precision=0)\n\n return", "def readBMdata(self, filename=None):\n if filename == None:\n filename = self.ifilename\n\n try:\n ifile = open(filename, 'r')\n except IOError:\n raise IOError('cannot find: %s. Either put the file in the working directory or fix the input file.' % filename)\n\n self.t = []\n self.b = []\n self.h = []\n self.m = []\n\n max_ic = max( self.ic_t, self.ic_b, self.ic_h, self.ic_m )\n i_line = 0\n for line in ifile:\n i_line += 1\n if (i_line < self.start_line):\n continue\n\n data = line.strip().split()\n if len(data) <= max_ic:\n raise IOError('Not enough collumns in %s based on input information.' % filename)\n\n if self.ic_t >= 0:\n self.t.append( float( data[self.ic_t] ) * self.t_conversion)\n if self.ic_b >= 0:\n self.b.append( float( data[self.ic_b] ))\n if self.ic_h >= 0:\n self.h.append( float( data[self.ic_h] ))\n if self.ic_m >= 0:\n self.m.append( float( data[self.ic_m] ))\n\n ifile.close()", "def _read(self, in_file):\n in_file.read(18) # pad bytes\n self.numnod = int(in_file.read(12))\n in_file.read(37) # pad bytes\n self.format = int(in_file.read(1))\n in_file.read(1) # eol\n self.nodes = []\n\n for _ in range(self.numnod):\n node = FRDNode()\n self.nodes.append(node)\n if self.format < 2:\n in_file.read(1)\n node.key = int(in_file.read(2))\n node.number = int(in_file.read(5*(self.format+1)))\n node.pos = [float(in_file.read(12)) for j in range(3)]\n in_file.read(1) # eol\n else:\n node.number = struct.unpack('i', in_file.read(4))[0]\n if self.format == 2:\n node.pos = struct.unpack('fff', in_file.read(12))\n else:\n node.pos = struct.unpack('ddd', in_file.read(24))\n\n if self.format < 2:\n in_file.readline() # last record for ascii only", "def _read_file(self):\n\n with open(self.file_name, 'rb') as f:\n new_test = struct.unpack('<l', f.read(8)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n old_test = struct.unpack('<h', f.read(6)[4:])[0]\n f.close()\n\n with open(self.file_name, 'rb') as f:\n other_test = struct.unpack('<l', f.read(20)[16:])[0]\n f.close()\n\n open_file = open(self.file_name, 'rb')\n\n if (other_test==202):\n raw = open_file.read(1236)[11:]\n self.model = '202'\n elif ((not new_test==102) and old_test==102):\n raw = open_file.read(1133)\n self.model = '102old'\n elif (new_test==102 and old_test==102):\n raw = open_file.read(1224)\n self.model = '102new'\n\n self.header = DpHeader(raw, self.model)\n\n self.data = DpData(open_file, \n self.model, \n self.header.interferogram_size, \n self.header.number_of_coadds, \n 2048*self.header.zero_fill,\n self.header.laser_wavelength_microns, \n self.header.dispersion_constant_xm,\n self.header.dispersion_constant_xb)\n\n open_file.close()", "def read_blob(self,blob_dim,n_blob=0):\n\n n_blobs = self.calc_n_blobs(blob_dim)\n if n_blob > n_blobs or n_blob < 0:\n raise ValueError('Please provide correct n_blob value. Given %i, but max values is %i'%(n_blob,n_blobs))\n\n # This prevents issues when the last blob is smaller than the others in time.\n if blob_dim[self.time_axis]*(n_blob+1) > self.selection_shape[self.time_axis]:\n updated_blob_dim = (int(self.selection_shape[self.time_axis] - blob_dim[self.time_axis]*n_blob), 1, int(blob_dim[self.freq_axis]))\n else:\n updated_blob_dim = [int(i) for i in blob_dim]\n\n blob_start = self._find_blob_start()\n blob = np.zeros(updated_blob_dim, dtype=self._d_type)\n\n # EE: For now; also assuming one polarization and one beam.\n\n # Assuming the blob will loop over the whole frequency range.\n if self.f_start == self.f_begin and self.f_stop == self.f_end:\n\n blob_flat_size = np.prod(blob_dim)\n updated_blob_flat_size = np.prod(updated_blob_dim)\n\n # Load binary data\n with open(self.filename, 'rb') as f:\n f.seek(int(self.idx_data + self._n_bytes * (blob_start + n_blob*blob_flat_size)))\n dd = np.fromfile(f, count=updated_blob_flat_size, dtype=self._d_type)\n\n if dd.shape[0] == updated_blob_flat_size:\n blob = dd.reshape(updated_blob_dim)\n else:\n logger.info('DD shape != blob shape.')\n blob = dd.reshape((int(dd.shape[0]/blob_dim[self.freq_axis]),blob_dim[self.beam_axis],blob_dim[self.freq_axis]))\n else:\n\n for blobt in range(updated_blob_dim[self.time_axis]):\n\n #Load binary data\n with open(self.filename, 'rb') as f:\n f.seek(int(self.idx_data + self._n_bytes * (blob_start + n_blob*blob_dim[self.time_axis]*self.n_channels_in_file + blobt*self.n_channels_in_file)))\n dd = np.fromfile(f, count=blob_dim[self.freq_axis], dtype=self._d_type)\n\n blob[blobt] = dd\n\n# if self.header['foff'] < 0:\n# blob = blob[:,:,::-1]\n\n return blob", "def readDataFromFile():\n image_size = 28 # each image is 28x28\n\n num_images = 60000 # there are 60k images\n with gzip.open(r'train-images-idx3-ubyte.gz', 'r') as f: # 60k train & valid\n f.read(16) # reading by 16-byte double\n buffer_Train_Images = f.read(image_size * image_size * num_images)\n f.close()\n data_Train_Images = np.frombuffer(buffer_Train_Images, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n data_Train_Images = data_Train_Images.reshape(num_images,\n image_size * image_size) # Data = 60k x 28 x 28 with 1 value in it\n\n with gzip.open('train-labels-idx1-ubyte.gz', 'r') as f: # 60k train & valid - labels\n f.read(8) # reading by 16-byte double\n buffer_Train_Labels = f.read(num_images)\n data_Train_Labels = np.frombuffer(buffer_Train_Labels, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n num_images = 10000 # there are 10k images\n with gzip.open('t10k-images-idx3-ubyte.gz', 'r') as f: # 10k tests\n f.read(16) # reading by 16-byte double\n buffer_Test_Image = f.read(image_size * image_size * num_images)\n data_Test_Image = np.frombuffer(buffer_Test_Image, dtype=np.uint8).astype(\n np.uint8) # translating into 0 to 255\n data_Test_Image = data_Test_Image.reshape(num_images, image_size * image_size) # Data = 60k x 28 x 28 with\n\n with gzip.open('t10k-labels-idx1-ubyte.gz', 'r') as f: # 10k tests - lbles\n f.read(8) # reading by 16-byte double\n buffer_Test_Label = f.read(num_images)\n data_Test_Labels = np.frombuffer(buffer_Test_Label, dtype=np.uint8).astype(\n np.int32) # translating into 0 to 255\n\n return data_Train_Images, data_Train_Labels, data_Test_Image, data_Test_Labels", "def _get_data_protobuf(self, filename):\n filename_queue = tf.train.string_input_producer([str(filename)],\n num_epochs=None)\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = self._get_features(serialized_example)\n\n # image\n with tf.name_scope(\"deserialise_image\"):\n image, image_height, image_width = self._image_from_features(features)\n\n # ground truth landmarks\n with tf.name_scope(\"deserialise_landmarks\"):\n gt_heatmaps, gt_lms, n_landmarks, visible, marked = self._heatmaps_from_features(features)\n\n # information\n with tf.name_scope(\"deserialise_info\"):\n scale = self._info_from_features(features)\n\n # augmentation\n with tf.name_scope(\"image_augmentation\"):\n if self.augmentation:\n gt_heatmaps, gt_lms, image, image_height, image_width = project.input.augmentation.augmentation(\n gt_heatmaps, gt_lms, image, image_height, image_width,\n max_scale=1.25, min_scale=0.75,\n max_rotate=30., min_rotate=-30.,\n flip_probability=0.5, flip_fn=self.flip_fn)\n\n with tf.name_scope(\"crop\"):\n # crop to 256 * 256\n gt_heatmaps, gt_lms, image = self._crop(gt_heatmaps, gt_lms, image, image_height, image_width)\n\n self._set_shape(image, gt_heatmaps, gt_lms)\n\n return image, gt_heatmaps, gt_lms, scale, marked", "def readData(self):\n f = open(self.filename)\n self.time = []\n self.data = []\n for line in f:\n if line.find('BAD FLAG') > 0:\n self.badValue = float(line.split(':')[1].strip())\n if line.find('LONGITUDE') > 0:\n self.lon = line.split(':')[1].strip()\n if line.find('LATITUDE') > 0:\n self.lat = line.split(':')[1].strip()\n if len(line) > 6 and line[2] == '-' and line[6] == '-':\n parts = line.rsplit(None, 1)\n # data line\n timeStamp = datetime.datetime.strptime(parts[0], '%d-%b-%Y %H')\n t = timeArray.datetimeToEpochTime(timeStamp)\n self.time.append(t)\n val = float(parts[1])\n self.data.append(val)\n\n self.time = np.array(self.time)\n self.data = np.array(self.data)\n # remove bad values\n if self.badValue:\n goodIx = self.data != self.badValue\n self.time = self.time[goodIx]\n self.data = self.data[goodIx]\n self.fileIsRead = True", "def parseContinuousRecord(self, f, r):\n '''\n junk, junk, r['TimeStamp'], r['Probe'], junk, junk, NumSamples =\n unpack('<hiqhhii', f.read(26))\n '''\n #junk, r['TimeStamp'], r['Probe'], junk, junk, NumSamples = unpack('qqhhii', f.read(28))\n junk, r['TimeStamp'], r['Probe'], junk, junk, NumSamples = unpackctsrec(f.read(28))\n r['NumSamples'] = NumSamples\n r['dataoffset'] = f.tell()\n # skip the waveform data for now\n f.seek(NumSamples*2, 1) # each sample is 2 bytes long", "def read_and_decode_file(filename_queue):\n\n reader = tf.TFRecordReader()\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example, features={\n 'image_raw': tf.FixedLenFeature([], tf.string),\n 'rating_average': tf.FixedLenFeature([], tf.string),\n 'image_name': tf.FixedLenFeature([], tf.string),\n 'image_links': tf.FixedLenFeature([], tf.string),\n })\n\n image = tf.image.decode_jpeg(features['image_raw'], channels=3)\n\n # image_resize = tf.image.resize_images(image, (299, 299))\n label = tf.string_to_number(features['rating_average'], tf.float32)\n # label = features['rating_average']\n\n image_name = features['image_name']\n links = features['image_links']\n\n return image, label, image_name, links", "def parse(self, recursive=True):\n # Read and process the file header\n self.fh.seek(0x0, 0)\n header = struct.unpack(\"=10I\", self.fh.read(0x28))\n\n data_start_block = header[0x1]\n\n idx_table_entry_count = header[0x4]\n\n off_table_offset = header[0x5]\n off_table_addr = self.IDX_TABLE_ADDR + off_table_offset\n\n self.fh.seek(self.IDX_TABLE_ADDR, 0)\n\n # Read and populate the index table\n idx_table = []\n for idx in range(idx_table_entry_count):\n typ, num1, num2, num3, num4 = Image.parse_idx_entry(\n self.fh.read(self.IDX_TABLE_ENTRY_SIZE)\n )\n idx_table.append([typ, num1, num2, num3, num4])\n\n self.fh.seek(off_table_addr, 0)\n\n # FIXME: Not sure what 'unk' is.\n off_table_entry_count, unk = struct.unpack(\"=4x2I\", self.fh.read(0xC))\n\n # Read and populate the offset table\n off_table = {}\n for i in range(off_table_entry_count):\n idx, blk_num = Image.parse_off_entry(\n self.fh.read(self.OFF_TABLE_ENTRY_SIZE)\n )\n off_table[idx] = blk_num\n\n # Populate entries\n self.entries = []\n for idx, idx_entry in enumerate(idx_table):\n if idx not in off_table:\n self.entries.append(None)\n continue\n\n blk_num = off_table[idx]\n off = self.fh.tell()\n\n # We need to grab the actual (compressed) size from the PACKage header\n res = None\n addr = self.BLOCK_NUM_ADDR(blk_num)\n if idx_entry[0x0] == b\"PAK \":\n self.fh.seek(addr)\n (\n typ,\n cnt,\n ptr_off,\n str_table_off,\n dec_data_off,\n dec_len,\n cmp_len,\n pad_len,\n ) = Package.parse_header(self.fh.read(Package.ENTRY_SIZE))\n res = Package(FileWindow(self.filename, addr, cmp_len), idx_entry[0x3])\n else:\n res = Resource(\n idx_entry[0x0], FileWindow(self.filename, addr, idx_entry[0x1])\n )\n\n if recursive:\n res.parse(recursive)\n self.entries.append(res)\n self.fh.seek(off, 0)", "def read_and_decode(\n filename_queue,\n model_input_image_size,\n tf_dict,\n tf_reader_settings,\n data_augmentations,\n number_of_files,\n aux=None,\n resize_output=None):\n reader = tf.TFRecordReader()\n\n # Switch between single/multi-file reading\n if number_of_files == 1:\n _, serialized_example = reader.read(filename_queue)\n features = tf.parse_single_example(\n serialized_example,\n features=tf_dict)\n else:\n _, serialized_examples = reader.read_up_to(\n filename_queue,\n num_records=number_of_files)\n features = tf.parse_example(\n serialized_examples,\n features=tf_dict)\n\n # Handle decoding of each element\n image = decode_data(\n features=features['image'],\n reader_settings=tf_reader_settings['image']['dtype'])\n label = decode_data(\n features=features['label'],\n reader_settings=tf_reader_settings['label']['dtype'])\n\n # Reshape each element\n if 'height' in tf_dict.keys():\n # Assume variable height width. Make a crop. Only for per-pixel atm.\n height = decode_data(\n features=features['height'],\n reader_settings=tf_reader_settings['height']['dtype'])\n width = decode_data(\n features=features['width'],\n reader_settings=tf_reader_settings['width']['dtype'])\n if 'coco_preproc' in data_augmentations:\n print('Warning: Interchanging height/width for COCO.')\n oheight = height\n height = width\n width = oheight\n image = tf.reshape(\n image, [height, width, tf_reader_settings['image']['reshape'][-1]])\n label = tf.cast(label, image.dtype)\n label = tf.reshape(\n label, [height, width, tf_reader_settings['label']['reshape'][-1]])\n if 'coco_preproc' in data_augmentations:\n image, label = crop_image_label(\n image=image,\n label=label,\n size=np.copy(tf_reader_settings['image']['reshape']).tolist(),\n crop='center')\n else:\n image = tf.reshape(image, tf_reader_settings['image']['reshape'])\n if tf_reader_settings['label']['reshape'] is not None:\n label = tf.reshape(label, tf_reader_settings['label']['reshape'])\n\n if image.dtype == tf.float64:\n print 'Forcing float64 image to float32.'\n image = tf.cast(image, tf.float32)\n if label.dtype == tf.float64:\n print 'Forcing float64 label to float32.'\n label = tf.cast(label, tf.float32)\n\n if aux is not None:\n aux_data = decode_data(\n features=features[aux.keys()[0]],\n reader_settings=tf_reader_settings[aux.keys()[0]]['dtype'])\n aux = tf.reshape(\n aux_data, tf_reader_settings[aux.keys()[0]]['reshape'])\n else:\n aux = tf.constant(0)\n\n # Preprocess images and heatmaps\n if len(model_input_image_size) == 3:\n # 2D image augmentations\n image, label = image_augmentations(\n image=image,\n label=label,\n model_input_image_size=model_input_image_size,\n data_augmentations=data_augmentations)\n if resize_output is not None:\n # Resize labels after augmentations\n if isinstance(resize_output, dict):\n if resize_output.keys()[0] == 'resize':\n label = resize_image_label(\n im=label,\n model_input_image_size=resize_output,\n f='nearest')\n elif resize_output.keys()[0] == 'pool':\n label = tf.expand_dims(label, axis=0)\n label = tf.nn.max_pool(\n value=label,\n ksize=resize_output['pool']['kernel'],\n strides=resize_output['pool']['stride'],\n padding='SAME')\n label = tf.squeeze(label, axis=0)\n else:\n raise NotImplementedError(resize_output.keys()[0])\n else:\n label = resize_image_label(\n im=label,\n model_input_image_size=resize_output,\n f='nearest')\n elif len(model_input_image_size) == 4:\n # 3D image augmentations.\n # TODO: optimize 3D augmentations with c++. This is slow.\n split_images = tf.split(\n image,\n model_input_image_size[0],\n axis=0)\n split_images = [tf.squeeze(im, axis=0) for im in split_images]\n images, labels = [], []\n if np.any(['label' in x for x in data_augmentations if x is not None]):\n split_labels = tf.split(\n label,\n model_input_image_size[0],\n axis=0)\n split_labels = [tf.squeeze(lab, axis=0) for lab in split_labels]\n for im, lab in zip(split_images, split_labels):\n it_im, it_lab = image_augmentations(\n image=im,\n label=lab,\n model_input_image_size=model_input_image_size[1:],\n data_augmentations=data_augmentations)\n if resize_output is not None:\n # Resize labels after augmentations\n it_lab = resize_image_label(\n im=it_lab,\n model_input_image_size=resize_output,\n f='area')\n images += [it_im]\n labels += [it_lab]\n label = tf.stack(\n labels,\n axis=0)\n image = tf.stack(\n images,\n axis=0)\n else:\n if None not in data_augmentations:\n for im in split_images:\n it_im = image_augmentations(\n image=im,\n model_input_image_size=model_input_image_size[1:],\n data_augmentations=data_augmentations)\n images += [it_im]\n image = tf.stack(\n images,\n axis=0)\n # if image.dtype != tf.float32:\n # image = tf.cast(image, tf.float32)\n return image, label, aux", "def __init__(self, fileName):\n in_ = JInputStream.getInputStream(fileName)\n b = int()\n len = int()\n cs = int()\n addr = int()\n buf = [None]*255\n eof = False\n line = 0\n i = 0\n while len(ihxData):\n self.ihxData[i] = -1\n i += 1\n try:\n while not eof:\n while True:\n b = in_.read()\n if b < 0:\n raise IhxParseException(\"Inexpected end of file\")\n if not ((b != int(':'))):\n break\n line += 1\n len = self.readHexByte(in_)\n # length field \n cs = len\n b = self.readHexByte(in_)\n # address field \n cs += b\n addr = b << 8\n b = self.readHexByte(in_)\n cs += b\n addr |= b\n b = self.readHexByte(in_)\n # record type field\n cs += b\n while i < len:\n # data\n buf[i] = int(self.readHexByte(in_))\n cs += buf[i]\n i += 1\n cs += self.readHexByte(in_)\n # checksum\n if (cs & 0xff) != 0:\n raise IhxParseException(\"Checksum error\")\n if b == 0:\n # data record\n while i < len:\n if self.ihxData[addr + i] >= 0:\n System.err.println(\"Warning: Memory at position \" + Integer.toHexString(i) + \" overwritten\")\n self.ihxData[addr + i] = int((buf[i] & 255))\n i += 1\n elif b == 1:\n # eof record\n eof = True\n else:\n raise IhxParseException(\"Invalid record type: \" + b)\n except IhxParseException as e:\n raise IhxFileDamagedException(fileName, line, e.getLocalizedMessage())\n try:\n in_.close()\n except Exception as e:\n System.err.println(\"Warning: Error closing file \" + fileName + \": \" + e.getLocalizedMessage())", "def readData(self):\n self._readHeader()\n self._readSize()\n self._readComments()\n self._readAllROI()\n self._readDate()\n self._readArray()", "def _read_datafile(self,path):\n \tlabels, images = [], []\n \twith gzip.GzipFile(path) as f:\n \t for line in f:\n \t vals = line.strip().split()\n \t labels.append(float(vals[0]))\n \t images.append([float(val) for val in vals[1:]])\n \tlabels = np.array(labels, dtype=np.int32)\n \tlabels[labels == 10] = 0 # fix weird 0 labels\n \timages = np.array(images, dtype=np.float32).reshape(-1, 16, 16, 1)\n \timages = (images + 1) / 2\n \treturn images, labels" ]
[ "0.64353794", "0.634234", "0.6255051", "0.60868925", "0.6069626", "0.6022551", "0.60060596", "0.5988088", "0.5954001", "0.59471613", "0.5936006", "0.59276175", "0.5907464", "0.58940315", "0.5716337", "0.571063", "0.56878084", "0.5677389", "0.5655111", "0.56530195", "0.5634797", "0.55943286", "0.5587032", "0.55833375", "0.5571079", "0.55654734", "0.5557327", "0.5543334", "0.5542223", "0.55349535" ]
0.694582
0
Returns the product 'abc' for a pythagorean triple (a^2 + b^2 = c^2) whose sum a+b+c is equal to the given number. Utilizes Euclid's formula.
def find_pythagorean_triple_product(num): m = 2 n = 1 while True: a = m ** 2 - n ** 2 b = 2 * m * n c = m ** 2 + n ** 2 if a ** 2 + b ** 2 == c ** 2 and a + b + c == num: return a * b * c else: m += 1 if m >= num: m = 1 n += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def product_pythagorean_triplet(N):\n\tfor a in range(0,N):\n\t\tfor b in range(a+1,N):\n\t\t\tfor c in range(b+1,N):\n\t\t\t\tif a+b+c == 1000 and is_pythagorean_triplet(a,b,c):\n\t\t\t\t\treturn a*b*c\n\treturn \"N too small\"", "def product1(a, b, c) :\n return a * b * c", "def nine():\r\n \r\n a = 3\r\n b = 4\r\n c = pythagorean(a, b)\r\n \r\n while a + b + c < 1001:\r\n while a + b + c < 1001:\r\n if a + b + c == 1000:\r\n return a * b * c\r\n b += 1\r\n c = pythagorean(a, b)\r\n a += 1\r\n b = a + 1\r\n c = pythagorean(a, b)", "def main():\n # Since a < b < c, we have a + b + c = 1000 > a + a + a, making a < 334.\n for a in range(1, 334):\n # Then b + c = 1000 - a > b + b, making b < 500 - a/2.\n for b in range(a + 1, 501 - a // 2):\n c = 1000 - a - b\n\n # check for Pythagorean triplet\n if a * a + b * b == c * c:\n return a * b * c", "def product(value1, value2, value3):\n prod = value1 * value2\n prod = prod * value3\n return prod", "def product(value1, value2, value3):\n prod = value1 * value2\n prod = prod * value3\n return prod", "def product(num_a, num_b):\r\n return num_a*num_b", "def two_of_three(a, b, c):\n \"*** YOUR CODE HERE ***\"\n return pow(a, 2) + pow(b,2) + pow(c,2) - pow(min(a,b,c),2)", "def pythagorean_triplet():\n triplet_list = []\n\n for c in range(1, 1000):\n for b in range(1, 1000):\n for a in range(1, 1000):\n if (a + b + c) == 1000 and ((a**2) + (b**2)) == (c**2) and a < b < c:\n triplet_list.append(a * b * c)\n\n try:\n if triplet_list[0] is not None:\n break\n except IndexError:\n continue\n\n return triplet_list[0]", "def pythagorean_triplet(target_sum):\n #a can be at most sum//3 because of a+b+c==sum and a<b\n for a in range(target_sum//3, 1, -1):\n #b can be at most sum//2 because of a+b+c==sum and b<c\n for b in range(target_sum//2, a, -1):\n #calculate c for the current a and b\n c = math.sqrt(a**2 + b**2)\n if c_fulfills_conditions(a, b, c, target_sum):\n return a * b * math.floor(c)", "def solve2(a, b, c):\n import gmpy2\n from gmpy2 import mpz\n\n x = (-b + gmpy2.isqrt(mpz(b**2 - 4*a*c))) / (2*a) \n y = (-b - gmpy2.isqrt(mpz(b**2 - 4*a*c))) / (2*a) \n return (x, y)", "def prod(n):\n product = S.One\n for i in n:\n product = product * i\n return product", "def is_pythagorean_triplet(a,b,c):\n\treturn a**2 + b**2 == c**2", "def pythagorean_triplet(p):\n\n # loop through values for a\n for a in range(1, p):\n\n # loop through values for b\n for b in range(1, p):\n\n # faster way to evaluate c\n c = p - a - b\n\n # test if a valid triple is found\n if (a ** 2 + b ** 2) == c ** 2:\n return a, b, c\n\n return 0, 0, 0", "def quadratic(x, a, b, c):\n y = a*(x**2)+b*x+c\n return y", "def product(numbers):\n p = 1\n for x in numbers:\n p *= x\n return p", "def product(numbers):\n p = 1\n for x in numbers:\n p *= x\n return p", "def two_of_three(a, b, c):\n return a ** 2 + b ** 2 + c ** 2 - (min(a, b, c) ** 2)", "def mult_numbers(numbers):\n product = 1\n for number in numbers:\n product = product * number\n\n return product", "def product(a, b):\n return a * b", "def pythagorean_triples(n):\n pass", "def special_pythagorean_triplet(s):\n\tfor a in xrange(1, s / 3):\n\t\tfor b in xrange(a + 1, s - a):\n\t\t\tc = s - a - b;\n\t\t\tif a ** 2 + b ** 2 == c ** 2:\n\t\t\t\treturn (a, b, c)", "def pythagorean_triples(n):\n l = []\n # loop over all a < b < c <= n\n for c in range(1, n + 1):\n for b in range(1, c):\n for a in range(1, b):\n if a*a + b*b == c*c:\n l.append((a, b, c))\n return l", "def special_pythagorean_triplet():\n return [a * b * (1000 - a - b) for a in range(1000 // 3) for b in\n range(a, 1000 // 2) if\n a * a + b * b == (1000 - a - b) * (1000 - a - b)][0]", "def find_square_tric(a, b, c):\n p = (a+b+c)/2\n s = math.sqrt(p * (p-a)*(p-b)*(p-c))\n return s", "def enc_mul_const(pub, m, c):\n mul_result = powmod(m, c, pub.n_sq)\n return mul_result", "def multiply(numbers):\n prod = 1\n for i in numbers:\n prod = prod*i\n return prod", "def main(): \n for a in range(0,1000):\n for b in range(a,1000):\n for c in range(b,1000):\n if (c*c == (a*a + b*b)) and (a+b+c == 1000):\n print a, b ,c", "def find_square_pr(a,b):\n return a * b", "def parabola(x, a, b):\n return a + b * x ** 2" ]
[ "0.6650262", "0.6598621", "0.6506206", "0.642716", "0.633817", "0.633817", "0.62214804", "0.62109685", "0.60733694", "0.6063541", "0.60557306", "0.60468155", "0.6007859", "0.6002839", "0.5995274", "0.5953455", "0.5953455", "0.5928577", "0.5911845", "0.58982366", "0.58903277", "0.5884619", "0.5880087", "0.58385134", "0.5824208", "0.58112025", "0.5805751", "0.5780869", "0.5761568", "0.576044" ]
0.7518107
0
Creates a string list with the devices type to test the source code. CUDA devices will be test only in case the current hardware supports it.
def get_test_devices(): devices = ["cpu"] if torch.cuda.is_available(): devices.append("cuda") return devices
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_test_devices():\n\n # Assumption: CPU is always available\n devices = ['cpu']\n\n if torch.cuda.is_available():\n devices.append('cuda')\n\n return devices", "def _get_device_list(self):\n if self.app.config.cloud_type == 'ec2':\n # c5/m5 on AWS mounts EBS volumes as NVMe:\n # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html\n # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html\n for itype in ['c5', 'm5']:\n if itype in self.app.cloud_interface.get_type():\n return frozenset(glob('/dev/nvme[0-26]n1'))\n return frozenset(glob('/dev/*d[a-z]'))", "def get_available_devices():\n executable_path = os.path.join(os.path.dirname(__file__), 'build')\n try:\n num_devices = int(subprocess.check_output(\n [\"{}/query_devices\".format(executable_path)]))\n except subprocess.CalledProcessError as e:\n return [0]\n\n FNULL = open(os.devnull, 'w')\n\n available_devices = []\n for i in range(num_devices):\n try:\n if b\"NVIDIA\" in subprocess.check_output(\n [\"{}/test_device\".format(executable_path),\n str(i)], stderr=FNULL):\n available_devices.append(i)\n logging.info('Device {} is available for rendering'.format(i))\n except subprocess.CalledProcessError as e:\n logging.info(e)\n logging.info('Device {} is not available for rendering'.format(i))\n FNULL.close()\n\n return available_devices", "def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")", "def test_get_devices(self):\n pass", "def test_get_devices(self):\n pass", "def test_get_pci_device_list(self):\n pass", "def test_get_device_templates(self):\n pass", "def devices() -> typing.List[str]:\n devices = sounddevice.query_devices()\n return [device['name'] for device in devices if device['max_output_channels'] > 0]", "def testBuildDeviceList(self):\n\n self.inv._devices = {\n 'first': self.Device(),\n 'second': self.Device(),\n 'third': self.Device()\n }\n self.inv._CmdFilter('targets', ['^f.*,second,^t.ird'])\n self.inv._CmdFilter('xtargets', [''])\n self.inv._device_list = None\n self.assertEqual(set(['first', 'second', 'third']),\n set(self.inv.device_list))\n\n self.inv._CmdFilter('targets', ['^f.*'])\n self.inv._device_list = None\n self.assertEqual(['first'], self.inv.device_list)", "def cuda_info() -> str:\n\n def _cuda_devices_formatting(\n info_function: typing.Callable,\n formatting_function: typing.Callable = None,\n mapping_function: typing.Callable = None,\n ):\n def _setup_default(function):\n return (lambda arg: arg) if function is None else function\n\n formatting_function = _setup_default(formatting_function)\n mapping_function = _setup_default(mapping_function)\n\n return \" | \".join(\n mapping_function(\n [\n formatting_function(info_function(i))\n for i in range(torch.cuda.device_count())\n ]\n )\n )\n\n def _device_properties(attribute):\n return _cuda_devices_formatting(\n lambda i: getattr(torch.cuda.get_device_properties(i), attribute),\n mapping_function=lambda in_bytes: map(str, in_bytes),\n )\n\n cuda_cap = _cuda_devices_formatting(\n torch.cuda.get_device_capability,\n formatting_function=lambda capabilities: \".\".join(map(str, capabilities)),\n )\n return \"\\n\".join(\n [\n f\"Available CUDA devices count: {torch.cuda.device_count()}\",\n f\"CUDA devices names: {_cuda_devices_formatting(torch.cuda.get_device_name)}\",\n f\"Major.Minor CUDA capabilities of devices: {cuda_cap}\",\n f\"Device total memory (bytes): {_device_properties('total_memory')}\",\n f\"Device multiprocessor count: {_device_properties('multi_processor_count')}\",\n ]\n )", "def devices(self):\n return list(self._device_types)", "def get_device_types():\n netAdminToolDB = app.config['DATABASE']\n\n device_types = netAdminToolDB.get_device_type()\n list = []\n for device_type in device_types:\n uri = url_for('get_device_type', device_type_id=device_type.id, _external=True)\n list.append({\n 'id': device_type.id,\n 'uri': uri,\n 'make': device_type.make,\n 'model': device_type.model,\n 'code': device_type.code\n })\n if list == []:\n return jsonify({'error': 'No device types found'}), 404\n\n return jsonify({'device_types': list})", "def _get_gpu_names() -> Sequence[str]:\n result = []\n for device in device_lib.list_local_devices():\n if device.device_type != \"GPU\":\n continue\n desc = device.physical_device_desc\n\n fields = desc.split(\",\")\n for field in fields:\n name, value = field.split(\":\", maxsplit=1)\n name = name.strip()\n value = value.strip()\n if name == \"name\":\n result.append(value)\n return result", "def vendor_list():\n return ['nxos', 'eos', 'cumulus']", "def list_devices():\n return _lib.SeaTeaseAPI().list_devices()", "def device() -> str:\n import torch\n\n if torch.cuda.is_available() and torch.cuda.device_count() > 0:\n if hasattr(Config().trainer,\n 'parallelized') and Config().trainer.parallelized:\n device = 'cuda'\n else:\n device = 'cuda:' + str(\n random.randint(0,\n torch.cuda.device_count() - 1))\n else:\n device = 'cpu'\n\n return device", "def list_devices():\r\n DeviceManagerCLI.BuildDeviceList()\r\n return DeviceManagerCLI.GetDeviceList()", "def list_local_devices():\n def _convert(pb_str):\n m = device_attributes_pb2.DeviceAttributes()\n m.ParseFromString(pb_str)\n return m\n return [_convert(s) for s in pywrap_tensorflow.DeviceFactory_AddDevices()]", "def test_get_devices1(self):\n pass", "def get_cl_devices():\n\n _devices = {'CPU':[], 'GPU':[]}\n\n platforms = cl.get_platforms()\n for platform in platforms:\n devices = platform.get_devices()\n for device in devices:\n if device.type == cl.device_type.CPU:\n _devices['CPU'].append(device)\n elif device.type == cl.device_type.GPU:\n _devices['GPU'].append(device)\n \n \n return _devices", "def get_platforms(one_class):\n platforms = []\n\n platform = one_class.split(' ')[-1]\n if platform == 'win':\n platforms.append('Windows')\n if platform == 'mac':\n platforms.append('Mac os')\n if platform == 'linux':\n platforms.append('Linux')\n if platform == 'vr_supported':\n platforms.append('VR Supported')\n\n return platforms", "def get_devices():\n try:\n with open(DEVICES, 'r') as f:\n data = json.load(f)['devices']\n except (IOError, ValueError) as err:\n raise SwiftlmCheckFailure('Failure opening %s: %s' % (DEVICES, err))\n\n devices = []\n for d in data:\n l = d.get('label', LABEL_CHECK_DISABLED)\n devices.append(Device(\n device=d['name'],\n mount=MOUNT_PATH+d['swift_drive_name'],\n label=l\n ))\n\n return devices", "def load_devices():", "def FindAllAvailableDevices(options):\n use_ssh = options.cros_remote and cros_interface.HasSSH()\n if not use_ssh and not IsRunningOnCrOS():\n logging.debug('No --remote specified, and not running on ChromeOs.')\n return []\n\n return [CrOSDevice(options.cros_remote, options.cros_remote_ssh_port,\n options.cros_ssh_identity, not use_ssh)]", "def device_type(devices):\n num_of_types = len({type(device) for device in devices})\n if num_of_types == 1:\n return devices[0].list_type.replace(\"_\", \" \").title()\n elif num_of_types == 0:\n return None\n else:\n raise ValueError", "def try_all_gpus(): #@save\n num_gpus = len(tf.config.experimental.list_physical_devices('GPU'))\n devices = [tf.device(f'/GPU:{i}') for i in range(num_gpus)]\n return devices if devices else [tf.device('/CPU:0')]", "def prepare_device(n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n print(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n print(f\"Warning: The number of GPU\\'s configured to use is {n_gpu_use}, but only {n_gpu} are \"\n \"available on this machine.\")\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids", "def prepare_device(n_gpu_use):\n n_gpu = torch.cuda.device_count()\n if n_gpu_use > 0 and n_gpu == 0:\n print(\"Warning: There\\'s no GPU available on this machine,\"\n \"training will be performed on CPU.\")\n n_gpu_use = 0\n if n_gpu_use > n_gpu:\n print(f\"Warning: The number of GPU\\'s configured to use is {n_gpu_use}, but only {n_gpu} are \"\n \"available on this machine.\")\n n_gpu_use = n_gpu\n device = torch.device('cuda:0' if n_gpu_use > 0 else 'cpu')\n list_ids = list(range(n_gpu_use))\n return device, list_ids", "def get_devices(needs: int = None):\n\n num_gpus = torch.cuda.device_count()\n\n if num_gpus == 0:\n devices = [torch.device(\"cpu\")]\n if needs is None:\n return devices\n return devices * needs\n\n devices = [torch.device(f\"cuda:{index:d}\") for index in range(num_gpus)]\n if needs is None:\n return devices\n return [device for _, device in zip(range(needs), itertools.cycle(devices))]" ]
[ "0.7328727", "0.6380837", "0.63609934", "0.610601", "0.6104033", "0.6104033", "0.60989046", "0.6091093", "0.6062906", "0.6032046", "0.60173154", "0.6013383", "0.5992261", "0.5990151", "0.5924036", "0.5923588", "0.58618355", "0.58598804", "0.58459705", "0.5845135", "0.5832014", "0.579728", "0.5789386", "0.57692945", "0.5744335", "0.57344854", "0.572225", "0.57162726", "0.57162726", "0.5711953" ]
0.72319925
1
Search for existing Service Catalog Provisioned Products. If it's not found then will search for any inprogress deployments since Control Tower has a serial method of deploying accounts.
def search_provisioned_products(search_pp_name, client: boto3.client) -> dict: logger.info(f"Searching for {search_pp_name}") response = client.search_provisioned_products( AccessLevelFilter={ 'Key': 'Account', 'Value': 'self' }, Filters={ 'SearchQuery': [f"name:{search_pp_name}"] } ) if len(response['ProvisionedProducts']) > 0: provisioned_product = response['ProvisionedProducts'][0] logger.info(f"Found {provisioned_product}") # Removing Create time since it doesn't serializable JSON well del provisioned_product['CreatedTime'] return provisioned_product else: # If the product has not been provisioned yet, Since Control Tower has a serial method of deploying # account this statement will check to see if there's and existing In-Progress deployment and will # return provision the product name / status logger.info(f"Did not find {search_pp_name}. Searching for any In-Progress Control Tower Deployments") return scan_provisioned_products(search_pp_name, client)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scan_provisioned_products(search_pp_name, client: boto3.client) -> dict:\n logger.info('Making sure Control Tower is not already executing')\n paginator = client.get_paginator(\"scan_provisioned_products\")\n for page in paginator.paginate(\n AccessLevelFilter={\n 'Key': 'Account',\n 'Value': 'self'\n }\n ):\n for x in page['ProvisionedProducts']:\n if x['Type'] == 'CONTROL_TOWER_ACCOUNT':\n\n # Since Control Tower has a serial method of deploying account this statement will check to see if\n # there's and existing In-Progress deployment and will return provision the product name / status\n if x['Status'] == 'UNDER_CHANGE' and x['Name'] != search_pp_name:\n logger.info(f\"Found In-Progress Control Tower Deployment ({x['Name']})\")\n return {\"ProvisionedProductName\": x['Name'], \"Status\": x['Status']}\n\n # If existing provision product found return\n elif x['Name'] == search_pp_name:\n logger.info(f\"Found {x}\")\n\n # Removing Create time since it doesn't serializable JSON well\n del x['CreatedTime']\n return x", "def scan_provisioned_products_single_page(self, **kwargs):\n return slurp(\n 'scan_provisioned_products',\n self.scan_provisioned_products,\n 'ProvisionedProducts',\n **kwargs\n )", "def search_provisioned_products_single_page(self, **kwargs):\n return slurp(\n 'search_provisioned_products',\n self.search_provisioned_products,\n 'ProvisionedProducts',\n **kwargs\n )", "def retrieve(self, where=\"\", parameters={}, target_path=os.path.curdir, use_symlinks=False):\n products = self.search(where=where, parameters=parameters)\n for product in products:\n if not product.core.active or 'archive_path' not in product.core:\n raise Error(\"product '%s' (%s) not available\" % (product.core.product_name, product.core.uuid))\n\n self._retrieve(product, target_path, use_symlinks)\n\n return len(products)", "def show_available_products(): # {{{\n products_available = {}\n try:\n with MONGO:\n product_collection = MONGO.connection.assignment_07[\"product\"].find(\n )\n\n for product in product_collection:\n if int(product[\"quantity_available\"]) > 0:\n products_available[product[\"product_id\"]] = {\n \"description\": product[\"description\"],\n \"product_type\": product[\"product_type\"],\n \"quantity_available\": product[\"quantity_available\"],\n }\n except TypeError as excep:\n LOGGER.warning(\"Error looking up available products\")\n LOGGER.warning(excep)\n else:\n if not products_available:\n LOGGER.info('No products found')\n else:\n LOGGER.info(\"Available products retrieved successfully.\")\n return products_available # }}}", "def check_products(self, adi):\r\n results = []\r\n products = self.get_products(adi)\r\n for product in products[\"data\"][\"products\"]:\r\n print(\"Checking product '{}'... \".format(product[\"name\"]), end='')\r\n detail = self.get_product_detail(adi, product_id=product[\"productId\"], product_name=product[\"name\"])\r\n if self.rf.valid_product_detail(detail):\r\n print(\"Valid.\")\r\n result = \"Available\"\r\n else:\r\n print(\"INVALID.\")\r\n result = \"Not available\"\r\n results.append([product[\"name\"], result])\r\n return results", "def get_vendors_and_products_seen(cls, cb):\n url = \"/device_control/v3/orgs/{0}/products\".format(cb.credentials.org_key)\n resp = cb.get_object(url)\n return resp.get(\"results\", [])", "def show_available_products(*args):\n logger.info(f\"Preparing dict of available prodcuts...\")\n available_products = {}\n\n with MONGO:\n mdb = eval(Settings.connect_string)\n products = mdb[\"product\"]\n for doc in products.find():\n del doc[\"_id\"]\n if int(doc[\"quantity_available\"]) > 0:\n product_id = doc[\"product_id\"]\n del doc[\"product_id\"]\n available_products[product_id] = doc\n\n return available_products", "def get_all_products(self):\n\t\tpass", "def ListProducts(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _product_available(self, cr, uid, ids, field_names=None, arg=False, context=None):\n if not field_names:\n field_names = []\n if context is None:\n context = {}\n res = {}\n \n for product_loc in self.browse(cr, uid, ids):\n c = context.copy()\n c.update({ 'states': ('done',), 'what': ('in', 'out'), 'location': product_loc.location_id.id})\n stock = self.pool.get('product.product').get_product_available(cr, uid, [product_loc.product_id.id], context=c)\n res[product_loc.id] = stock.get(product_loc.product_id.id, 0.0)\n return res", "def call(self):\r\n clean_products = []\r\n\r\n for category in CATEGORIES:\r\n print(f\"Chargement des produits de type {category}\")\r\n api_url = SEARCH_API_URL + \\\r\n (f\"?search_terms={category}\"\r\n \"&search_tag=category&sort_by=unique_scans_n\"\r\n \"&page_size=1000&json=1\")\r\n json_response = requests.get(api_url).json()\r\n products = json_response[\"products\"]\r\n\r\n for product in products:\r\n clean_product = {\r\n k: v for k, v in product.items()\r\n if k in FIELD_NEEDED and v != ''}\r\n clean_products.append(clean_product)\r\n\r\n return clean_products", "def prepare_product_for_export(self):\n _logger.info(\"Starting product exporting via %s method...\" % self.export_method)\n\n active_template_ids = self._context.get(\"active_ids\", [])\n templates = self.env[\"product.template\"].browse(active_template_ids)\n product_templates = templates.filtered(lambda template: template.type == \"product\")\n if not product_templates:\n raise Warning(\"It seems like selected products are not Storable products.\")\n\n if self.export_method == \"direct\":\n return self.export_direct_in_shopify(product_templates)\n elif self.export_method == \"csv\":\n return self.export_csv_file(product_templates)", "def supports_catalog_search(self):\n return False", "def check_services(self):\n for service in self.services:\n try:\n self.cloud.search_services(service)[0]\n except Exception: # pylint: disable=broad-except\n self.is_skipped = True\n break", "def test_dir_search_doesnt_get_any_product(client, jwt, session, keycloak_mock): # pylint:disable=unused-argument\n headers = factory_auth_header(jwt=jwt, claims=TestJwtClaims.staff_admin_role)\n client.post('/api/v1/users', headers=headers, content_type='application/json')\n rv = client.post('/api/v1/orgs', data=json.dumps(TestOrgInfo.org_anonymous),\n headers=headers, content_type='application/json')\n assert rv.status_code == http_status.HTTP_201_CREATED\n dictionary = json.loads(rv.data)\n assert dictionary['accessType'] == 'ANONYMOUS'\n assert schema_utils.validate(rv.json, 'org_response')[0]\n\n rv_products = client.get(f\"/api/v1/orgs/{dictionary.get('id')}/products\", headers=headers,\n content_type='application/json')\n\n list_products = json.loads(rv_products.data)\n assert len([x for x in list_products if x.get('subscriptionStatus') != 'NOT_SUBSCRIBED']) == 0", "def search_product(self):\n cat = []\n product = open_products()\n radio = self.radiobutton_check()\n search = self.lineEdit_search.text()\n _translate = QtCore.QCoreApplication.translate\n __sortingEnabled = self.tableWidget.isSortingEnabled()\n self.tableWidget.setSortingEnabled(False)\n o=0\n if len(self.lineEdit_search.text()) == 0:\n self.show_product()\n else:\n for r in range(0, len(product)):\n if search.upper() in str(product[r][radio]).upper():\n cat.append(product[r])\n for i in range(0, len(cat)):\n for c in range(0, 5):\n item = self.tableWidget.item(i, c)\n item.setText(_translate(\"MainWindow\", str(cat[i][c])))\n o+=1\n else:\n for c in range(0, 5):\n item = self.tableWidget.item(r, c)\n item.setText(_translate(\"MainWindow\", \"\"))\n if o == 0:\n self.frame_3.show()\n self.label_16.setText('PRODUCT NOT FOUND!')", "def catalog_exists(self, args):\n catalog = self.server.connect_ermrest(self.id)\n pp(catalog.exists())", "def _check_product(self):\n\n self.importable = False\n abcde = string.ascii_uppercase[:5]\n product_infos = self.retrieve_product_infos()\n\n if product_infos['product_code'] is not None:\n try:\n Products.objects.get(\n code=product_infos['product_code']\n )\n except Products.DoesNotExist:\n if (\n product_infos['product_name'] is not None\n and product_infos['product_code'] not in ProductImportation.codes\n and product_infos['product_code'] is not None\n and product_infos['product_url'] is not None\n and product_infos['image_url'] is not None\n and product_infos['quantity'] is not None\n and product_infos['ingredients'] is not None\n and product_infos['brands'] != []\n and product_infos['stores'] != []\n and product_infos['countries'] is not None\n and product_infos['compare_to'] is not None\n and product_infos['categories_hierarchy'] is not None\n and product_infos['nutriscore'] in abcde\n and all([product_infos[nutriment] >= 0 for nutriment in self.list_nutriments])\n and Categories.objects.filter(name=product_infos['compare_to']).count() > 0\n ):\n self.name = product_infos['product_name']\n self.product_infos = product_infos\n self.code = product_infos['product_code']\n ProductImportation.codes.append(self.code)\n self.importable = True\n\n return self.importable", "def products_list(driver, login_action, open_products_page, products_page, logger):\n try:\n return products_page.all_products_list()\n except logger.on_exception(exception, driver):\n print(exception)", "def get_exists(self):\n self.exist_products = {}\n limit = 100\n with sa.create_engine(dsn).connect() as dbcon:\n count = [x for x in dbcon.execute(Product.count())][0][0]\n for i in range(count//limit+1):\n sql = sa.select([Product.c.id, Product.c.title]).limit(limit).offset(limit*i)\n part = {hash(x[1]): x[0] for x in dbcon.execute(sql)}\n self.exist_products.update(part)", "def run(self, search_term):\n response = self.API_request(search_term, \"product\")\n products = self.prod_parser(response)\n response = self.API_request(products[0][\"categories\"][0], \"substitute\")\n substitutes = self.prod_parser(response)\n search_prod = products[0]\n substitutes.append(search_prod)\n data = {\"substitutes\": substitutes}\n self.insert_data(data)", "def show_available_products():\n LOGGER.debug('Listing all available products.')\n available_products = {}\n with MongoDBConnection() as mongo:\n database = mongo.connection.hp_norton\n for product in database.products.find(\n {'quantity_available': {'$gt': 0}}):\n available_products[product['product_id']] = {\n 'description': product['description'],\n 'product_type': product['product_type'],\n 'quantity_available': product['quantity_available']}\n return available_products", "def product_search(obj, query):\n client = get_client(obj)\n\n pgs = client.product_list(q=query)\n\n print(json.dumps(pgs, indent=4))", "def can_search_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinQuerySession.can_search_bins_template\n return self._get_provider_session('catalog_query_session').can_search_catalogs()", "def test_get_deployments(self):\n pass", "def test_get_deployments(self):\n pass", "def get_products(self):\n con = dbcon()\n cur = con.cursor()\n cur.execute(\"SELECT * FROM products;\")\n res = cur.fetchall()\n if res:\n prdcts=[]\n for prodct_item in res:\n picked_prdct = {\n 'product_id':prodct_item[0],\n 'product_name':prodct_item[1],\n 'price':prodct_item[2],\n 'quantity':prodct_item[3]\n }\n prdcts.append(picked_prdct)\n return jsonify({\"Products\": prdcts}), 200\n return jsonify({\"message\":\"No products in store\"})", "def test_product_is_installed(self):\n pid = 'collective.favorites'\n installed = [p['id'] for p in self.qi_tool.listInstalledProducts()]\n self.assertTrue(pid in installed,\n 'package appears not to have been installed')", "def show_available_products():\n available_product = {}\n\n if not collection_exist(DATABASE, PRODUCT_COLLECTION):\n return available_product\n\n with MongoDBConnection() as mongo:\n database = mongo.connection[DATABASE]\n\n available_product__count = 0\n for product in database[PRODUCT_COLLECTION].find({\"quantity_available\": {\"$ne\": '0'}}):\n available_product[product['product_id']] = \\\n {'description': product['description'],\n 'product_type': product['product_type'],\n 'quantity_available': product['quantity_available']}\n available_product__count += 1\n\n return available_product__count" ]
[ "0.7160304", "0.5986171", "0.5977867", "0.5391701", "0.53883964", "0.53183687", "0.5263458", "0.52195495", "0.5137301", "0.5083074", "0.504988", "0.503884", "0.5022518", "0.50006783", "0.48638386", "0.48609245", "0.48583177", "0.48581347", "0.4857908", "0.48564598", "0.48457754", "0.482968", "0.4827839", "0.4827129", "0.48131114", "0.4798492", "0.4798492", "0.4794938", "0.47945568", "0.47813064" ]
0.7295413
0
Retrieve the Default Service Catalog Provisioning Artifact Id from the Service Catalog Product specified in the definition call.
def get_provisioning_artifact_id(product_name: str, client: boto3.client) -> str: product_info = client.describe_product( Name=product_name ) logger.info(product_info) for _product_info in product_info['ProvisioningArtifacts']: if _product_info['Guidance'] == 'DEFAULT': logger.info(f"Found ProvisioningArtifactId:{_product_info['Id']}") return _product_info['Id']
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def default_resource_discovery_association_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"default_resource_discovery_association_id\")", "def get_product_id(self, field_name='PRODUCT_ID'):\n return self.get_default(field_name)", "def default_resource_discovery_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"default_resource_discovery_id\")", "def get_default_product():\n return Product.objects.get_or_create(name='Unknown', category=get_default_category())", "def product_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"product_id\")", "def product_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"product_id\")", "def _get_product(self):\n try:\n return self.activities[industry.MANUFACTURING].products[0].typeID\n except (KeyError, IndexError):\n return None", "def _get_product_id(device_dict):\n return device_dict['product_id'].split('x')[-1]", "def DeploymentId(self) -> _n_0_t_0:", "def product_id(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"product_id\")", "def get_product_code(self, field_name='PRODUCT_ID'):\n return self.get_default(field_name)", "def getId(self):\n return _libsbml.GeneProductAssociation_getId(self)", "def artifact_id(self):\n return self._artifact_id", "def identity(self) -> Optional['outputs.DataCollectionEndpointResourceResponseIdentity']:\n return pulumi.get(self, \"identity\")", "def Ientifier(self, default=None):\n return self.data.get('identifier', default)", "def ORCID(self, default=None):\n return self.data.get('orcid', default)", "def product_id(self):\n return self._product_id", "def product_id(self):\n return self._product_id", "def product_id(self):\n return self._product_id", "def product_id(self):\n return self._product_id", "def apple_id(self):\n if \"appleId\" in self._prop_dict:\n return self._prop_dict[\"appleId\"]\n else:\n return None", "def default_storage_account_id(self) -> str:\n return pulumi.get(self, \"default_storage_account_id\")", "def bundle_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"bundle_id\")", "async def get_cred_def_id(controller, credential_def):\n\n # TODO Determine what is funky here?!\n cred_def_id = credential_def[\"credential_definition_id\"]\n if not cred_def_id:\n raise HTTPException(\n status_code=404,\n detail=\"Something went wrong. Could not find credential definition id from the provided credential definition\",\n )\n return cred_def_id", "def VersionID(self, default=None):\n return self.data.get('version_id', default)", "def _get_project_id():\n\n extras = BaseHook.get_connection('google_cloud_default').extra_dejson\n key = 'extra__google_cloud_platform__project'\n if key in extras:\n project_id = extras[key]\n else:\n raise ('Must configure project_id in google_cloud_default '\n 'connection from Airflow Console')\n return project_id", "def bundle_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bundle_id\")", "def bundle_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bundle_id\")", "def bundle_id(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bundle_id\")", "def identity(self) -> Optional[pulumi.Input['ServiceIdentityArgs']]:\n return pulumi.get(self, \"identity\")" ]
[ "0.62677944", "0.6110953", "0.5900704", "0.56286764", "0.55779773", "0.55494344", "0.5543497", "0.5379514", "0.53641295", "0.5290732", "0.5174435", "0.51636326", "0.51466084", "0.5116483", "0.5071252", "0.5062847", "0.50372", "0.50372", "0.50372", "0.50372", "0.50332355", "0.5016951", "0.5014863", "0.49879563", "0.49775347", "0.49624065", "0.49495882", "0.49495882", "0.49495882", "0.49368235" ]
0.7186167
0
Build an op used as a target for return values at given quantiles.
def _build_target_quantile_values_op(self): batch_size = tf.shape(self._replay.rewards)[0] ###### Munchausen-specific replay_action_one_hot = tf.one_hot( self._replay.actions, self.num_actions, 1., 0., name='action_one_hot') # tau * ln pi_k+1 (s') replay_next_log_policy = utils.stable_scaled_log_softmax( self._replay_next_target_q_values, self.tau, axis=1) # tau * ln pi_k+1(s) replay_log_policy = utils.stable_scaled_log_softmax( self._replay_target_q_values, self.tau, axis=1) replay_next_policy = utils.stable_softmax( # pi_k+1(s') self._replay_next_target_q_values, self.tau, axis=1) tau_log_pi_a = tf.reduce_sum( # ln pi_k+1(a|s) replay_log_policy * replay_action_one_hot, axis=1) tau_log_pi_a = tf.clip_by_value( tau_log_pi_a, clip_value_min=self.clip_value_min, clip_value_max=0) munchuasen_term = self.alpha * tau_log_pi_a ######### # Shape of rewards: (num_tau_prime_samples x batch_size) x 1. rewards = self._replay.rewards[:, None] + munchuasen_term[Ellipsis, None] rewards = tf.tile(rewards, [self.num_tau_prime_samples, 1]) is_terminal_multiplier = 1. - tf.cast(self._replay.terminals, tf.float32) # Incorporate terminal state to discount factor. # size of gamma_with_terminal: (num_tau_prime_samples x batch_size) x 1. gamma_with_terminal = self.cumulative_gamma * is_terminal_multiplier gamma_with_terminal = tf.tile(gamma_with_terminal[:, None], [self.num_tau_prime_samples, 1]) # shape: (batch_size * num_tau_prime_samples) x num_actions replay_next_policy_ = tf.tile(replay_next_policy, [self.num_tau_prime_samples, 1]) replay_next_log_policy_ = tf.tile(replay_next_log_policy, [self.num_tau_prime_samples, 1]) # shape: (batch_size * num_tau_prime_samples) x 1 replay_quantile_values = tf.reshape( self._replay_net_target_quantile_values, [batch_size * self.num_tau_prime_samples, self.num_actions]) # shape: (batch_size * num_tau_prime_samples) x num_actions weighted_logits = ( replay_next_policy_ * (replay_quantile_values - replay_next_log_policy_)) # shape: (batch_size * num_tau_prime_samples) x 1 target_quantile_values = tf.reduce_sum(weighted_logits, axis=1, keepdims=True) return rewards + gamma_with_terminal * target_quantile_values
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _build_train_op(self):\n batch_size = tf.shape(self._replay.rewards)[0]\n\n target_quantile_values = tf.stop_gradient(\n self._build_target_quantile_values_op())\n # Reshape to self.num_tau_prime_samples x batch_size x 1 since this is\n # the manner in which the target_quantile_values are tiled.\n target_quantile_values = tf.reshape(target_quantile_values,\n [self.num_tau_prime_samples,\n batch_size, 1])\n # Transpose dimensions so that the dimensionality is batch_size x\n # self.num_tau_prime_samples x 1 to prepare for computation of\n # Bellman errors.\n # Final shape of target_quantile_values:\n # batch_size x num_tau_prime_samples x 1.\n target_quantile_values = tf.transpose(target_quantile_values, [1, 0, 2])\n\n # Shape of indices: (num_tau_samples x batch_size) x 1.\n # Expand dimension by one so that it can be used to index into all the\n # quantiles when using the tf.gather_nd function (see below).\n indices = tf.range(self.num_tau_samples * batch_size)[:, None]\n\n # Expand the dimension by one so that it can be used to index into all the\n # quantiles when using the tf.gather_nd function (see below).\n reshaped_actions = self._replay.actions[:, None]\n reshaped_actions = tf.tile(reshaped_actions, [self.num_tau_samples, 1])\n # Shape of reshaped_actions: (num_tau_samples x batch_size) x 2.\n reshaped_actions = tf.concat([indices, reshaped_actions], axis=1)\n\n chosen_action_quantile_values = tf.gather_nd(\n self._replay_net_quantile_values, reshaped_actions)\n # Reshape to self.num_tau_samples x batch_size x 1 since this is the manner\n # in which the quantile values are tiled.\n chosen_action_quantile_values = tf.reshape(chosen_action_quantile_values,\n [self.num_tau_samples,\n batch_size, 1])\n # Transpose dimensions so that the dimensionality is batch_size x\n # self.num_tau_samples x 1 to prepare for computation of\n # Bellman errors.\n # Final shape of chosen_action_quantile_values:\n # batch_size x num_tau_samples x 1.\n chosen_action_quantile_values = tf.transpose(\n chosen_action_quantile_values, [1, 0, 2])\n\n # Shape of bellman_erors and huber_loss:\n # batch_size x num_tau_prime_samples x num_tau_samples x 1.\n bellman_errors = target_quantile_values[\n :, :, None, :] - chosen_action_quantile_values[:, None, :, :]\n # The huber loss (see Section 2.3 of the paper) is defined via two cases:\n # case_one: |bellman_errors| <= kappa\n # case_two: |bellman_errors| > kappa\n huber_loss_case_one = (\n tf.cast(tf.abs(bellman_errors) <= self.kappa, tf.float32) *\n 0.5 * bellman_errors ** 2)\n huber_loss_case_two = (\n tf.cast(tf.abs(bellman_errors) > self.kappa, tf.float32) *\n self.kappa * (tf.abs(bellman_errors) - 0.5 * self.kappa))\n huber_loss = huber_loss_case_one + huber_loss_case_two\n\n # Reshape replay_quantiles to batch_size x num_tau_samples x 1\n replay_quantiles = tf.reshape(\n self._replay_net_quantiles, [self.num_tau_samples, batch_size, 1])\n replay_quantiles = tf.transpose(replay_quantiles, [1, 0, 2])\n\n # Tile by num_tau_prime_samples along a new dimension. Shape is now\n # batch_size x num_tau_prime_samples x num_tau_samples x 1.\n # These quantiles will be used for computation of the quantile huber loss\n # below (see section 2.3 of the paper).\n replay_quantiles = tf.cast(\n tf.tile(replay_quantiles[:, None, :, :],\n [1, self.num_tau_prime_samples, 1, 1]), tf.float32)\n # Shape: batch_size x num_tau_prime_samples x num_tau_samples x 1.\n quantile_huber_loss = (tf.abs(replay_quantiles - tf.stop_gradient(\n tf.cast(bellman_errors < 0, tf.float32))) * huber_loss) / self.kappa\n # Sum over current quantile value (num_tau_samples) dimension,\n # average over target quantile value (num_tau_prime_samples) dimension.\n # Shape: batch_size x num_tau_prime_samples x 1.\n loss = tf.reduce_sum(quantile_huber_loss, axis=2)\n # Shape: batch_size x 1.\n loss = tf.reduce_mean(loss, axis=1)\n\n update_priorities_op = tf.no_op()\n with tf.control_dependencies([update_priorities_op]):\n if self.summary_writer is not None:\n with tf.variable_scope('Losses'):\n tf.summary.scalar('QuantileLoss', tf.reduce_mean(loss))\n return self.optimizer.minimize(tf.reduce_mean(loss)), tf.reduce_mean(loss)", "def _make_executor(self, expr=None):\n raise NotImplementedError()", "def get_quantiles(data, quantiles):\n variables = list(data.data_vars)\n proxy_quant_df = pd.DataFrame(columns=[str(i) for i in quantiles], index=variables)\n for var in variables:\n for q in quantiles:\n proxy_quant_df.loc[var, str(q)] = data[var].quantile(q).values\n\n return proxy_quant_df", "def quantile_transform(X, *, axis=..., n_quantiles=..., output_distribution=..., ignore_implicit_zeros=..., subsample=..., random_state=..., copy=...):\n ...", "def generate_operand(uri):\n pass", "def quantile_func(q):\n def f(x):\n return np.quantile(x, q)\n\n return f", "def performance_quantiles(data, performance_measure):\n quantiles = pd.qcut(x=data[performance_measure], q=4, labels=['q1', 'q2', 'q3', 'q4'])\n bins = quantiles.to_frame(name=performance_measure + '_quantiles')\n data_quantiles = pd.merge(data, bins, right_index=True, left_index=True)\n data_quantiles.dropna(inplace=True)\n data_quantiles.sort_values(performance_measure + '_quantiles', inplace=True)\n return data_quantiles", "def _quantile(data, quantile):\r\n index = quantile * (len(data) - 1)\r\n bottom_index = int(floor(index))\r\n top_index = int(ceil(index))\r\n\r\n difference = index - bottom_index\r\n output = (1 - difference) * \\\r\n data[bottom_index] + difference * data[top_index]\r\n\r\n return output", "def createDecile(resp:pd.Series,score:pd.Series,buckets:int=10) -> pd.DataFrame: \n \n input_df=pd.DataFrame({'target_1':resp,'score':score}) \n input_df['target_0'] = 1-input_df['target_1'] \n input_df['decile'] = pd.qcut(input_df['score'],buckets,duplicates='drop')\n binned_df = input_df.groupby('decile', as_index = False)\n \n aggregated_df = pd.DataFrame()\n aggregated_df['min_score'] = binned_df.min().score.apply('{0:.3f}'.format)\n aggregated_df['max_score'] = binned_df.max().score.apply('{0:.3f}'.format)\n aggregated_df['target_1'] = binned_df.sum().target_1\n aggregated_df['target_0'] = binned_df.sum().target_0\n aggregated_df['total'] = (aggregated_df['target_1'] + aggregated_df['target_0'])\n aggregated_df['target_1_ratio'] = (aggregated_df['target_1'] / aggregated_df['total']).apply('{0:.1%}'.format)\n aggregated_df['mean_score'] = binned_df.mean().score.apply('{0:.3f}'.format) \n \n sorted_df = (aggregated_df.sort_values(by = 'max_score', ascending = False)).reset_index(drop = True)\n sorted_df['gain'] = (sorted_df['target_1'].cumsum()/sorted_df['target_1'].sum()).apply('{0:.1%}'.format)\n sorted_df['lift'] = ((sorted_df['target_1']/sorted_df.total)/(sorted_df['target_1'].sum()/sorted_df.total.sum())).apply('{0:.2f}'.format)\n sorted_df['KS'] = np.round(((sorted_df['target_1'] / sorted_df['target_1'].sum()).cumsum() - (sorted_df['target_0'] / sorted_df['target_0'].sum()).cumsum()), 4) * 100\n \n mark = lambda x: '◄─ ' if x == sorted_df.KS.max() else ''\n sorted_df['max_KS'] = sorted_df.KS.apply(mark)\n sorted_df.index +=1\n \n return sorted_df", "def make_quantiles(self, X, n_quantiles=9):\n if n_quantiles < 1:\n raise FeatureImpactError(\"n_quantiles must be at least one.\")\n X = pandas.DataFrame(X)\n probs = numpy.linspace(0.0, 1.0, n_quantiles + 2)[1:-1]\n self._quantiles = pandas.DataFrame(dtype=float)\n for col in X:\n feature = X[col].dropna().values\n values = []\n for quantile in mquantiles(feature, probs):\n closest = numpy.abs(feature - quantile).argmin()\n values.append(feature[closest])\n self._quantiles[col] = values", "def construct_target(trainSamples):\n feature_names = [\"click_bool\", \"booking_bool\",\"position\"]\n samples = trainSamples[feature_names].values\n def f(vec):\n x, y, z = vec\n return y + 0.2 * x\n return [f(vec) for vec in samples]", "def fitForQuantiles(self, quantiles):\n results = []\n for tau in quantiles:\n res = self.fit(tau)\n results.append(res.results)\n convergence_failure = False\n if res.regression.iterations >= 1000:\n convergence_failure = True\n self.convergence_failures.append({'name':self.endog_original.name, **res.results, \n 'convergence_failure':convergence_failure})\n df = pd.DataFrame(results)\n\n # Adding the QKS statistic\n df['QKS'] = max(map(abs, df['tₙ(τ)']))\n df['name'] = self.endog.name\n df.set_index('quantile', inplace=True)\n return df", "def default_quantile():\n return np.logspace(-5, 0, 100)", "def _build_summary_op(self, results=None, features=None, labels=None):\n summary_op = []\n for summary in self.summaries:\n if summary == summarizer.SummaryOptions.ACTIVATIONS:\n activations = get_tracked(tf.GraphKeys.ACTIVATIONS)\n summary_op += summarizer.add_activations_summary(activations)\n elif summary == summarizer.SummaryOptions.VARIABLES:\n variables = tf.trainable_variables()\n summary_op += summarizer.add_trainable_vars_summary(variables)\n elif summary == summarizer.SummaryOptions.GRADIENTS and self._clip_gradients > 0.0:\n summary_op += summarizer.add_gradients_summary(self._grads_and_vars)\n elif summary == summarizer.SummaryOptions.LOSS:\n summary_op += summarizer.add_loss_summaries(self._total_loss, self._loss)\n elif summary == summarizer.SummaryOptions.LEARNING_RATE:\n summary_op += summarizer.add_learning_rate_summaries()\n elif summary == summarizer.SummaryOptions.IMAGE_INPUT:\n summary_op += summarizer.add_image_summary(features, op_name='inputs')\n elif summary == summarizer.SummaryOptions.IMAGE_RESULT:\n summary_op += summarizer.add_image_summary(results, op_name='results')\n\n # no need to tf.summary.merge(summary_op), for now we merge all at hook level\n return summary_op", "def gen_jobs(lower_idx, upper_idx, target=\"llvm\"):\n return [LorienTestWorkload(target, idx).to_job() for idx in range(lower_idx, upper_idx)]", "def find_quant(trainy, train_tree_node_ID, pred_tree_node_ID, qntl):\n npred = pred_tree_node_ID.shape[0]\n out = np.zeros((npred, qntl.size))*np.nan\n for i in prange(pred_tree_node_ID.shape[0]):\n idxs = np.where(train_tree_node_ID == pred_tree_node_ID[i, :])[0]\n sample = trainy[idxs]\n out[i, :] = np.quantile(sample, qntl)\n return out", "def createQuantile(data, column_name, cut_of_point):\r\n data[data[column_name] > data[column_name].quantile(cut_of_point)] = 0\r\n return data", "def to_quantiles(self, y_pred: torch.Tensor) -> torch.Tensor:\n return self.metrics[0].to_quantiles(y_pred)", "def predict_quantiles(self, X, quantiles=(2.5, 97.5), kern=None, **kwargs):\n mu, var = self._raw_predict(X, full_cov=False, kern=kern)\n quantiles = [stats.t.ppf(q / 100., self.nu + 2 + self.num_data) * np.sqrt(var) + mu for q in quantiles]\n\n if self.normalizer is not None:\n quantiles = [self.normalizer.inverse_mean(q) for q in quantiles]\n\n return quantiles", "def quantile_loss(pred, true, quantiles, mask=None, weights=None):\n \n assert(len(quantiles)==pred.shape[2])\n \n Q = torch.cat([q*torch.ones_like(true) for q in quantiles], dim=2)\n pinball = Q*F.relu(true - pred) + (1.-Q)*F.relu(pred - true)\n mean_pinball = pinball.mean(axis=[0,1])\n return mean_pinball", "def _build_target_q_op(self):\n targets = []\n for gamma, target_q in zip(self.gammas,\n self._replay_next_target_net_outputs.q_values):\n # Get the maximum Q-value across the actions dimension.\n replay_next_qt_max = tf.reduce_max(target_q, 1)\n\n # Calculate the Bellman target value.\n # Q_t = R_t + \\gamma^N * Q'_t+1\n # where,\n # Q'_t+1 = \\argmax_a Q(S_t+1, a)\n # (or) 0 if S_t is a terminal state,\n # and\n # N is the update horizon (by default, N=1).\n cumulative_gamma = math.pow(gamma, self.update_horizon)\n n_step_reward = self._build_discounted_n_step_rewards(gamma)\n targets.append(n_step_reward + cumulative_gamma * replay_next_qt_max *\n (1. - tf.cast(self._replay.terminals, tf.float32)))\n return targets", "def compute_quantile(risk, T_max: int, scenario_numbers, quantile):\r\n\r\n print(\"\\tComputing Quantile...\")\r\n # Init quantile\r\n q = np.zeros(T_max)\r\n for t in range(T_max):\r\n risk[t].sort()\r\n q[t] = risk[t][int(np.ceil(scenario_numbers[t] * quantile)) - 1]\r\n print(\"\\tDone\")\r\n\r\n return q", "def wrapper(*args, **kwargs):\n assert not kwargs, \"Do not support kwargs in template function call\"\n task_env = TaskExtractEnv.current\n if task_env is not None and task_env.tracing:\n task_env.add_task(task_name, args)\n workload = args_to_workload(args, task_name)\n tgt = Target.current()\n cfg = DispatchContext.current.query(tgt, workload)\n node = topi_compute(cfg, *args)\n\n # attach workload to return op\n op = node.op\n attrs = {}\n for k, v in node.op.attrs.items():\n attrs[k] = v\n attrs[\"workload\"] = workload\n if isinstance(op, tensor.ComputeOp):\n op = tvm.te._ffi_api.ComputeOp(op.name, op.tag, attrs, op.axis, op.body)\n elif isinstance(op, tensor.ExternOp):\n op = tvm.te._ffi_api.ExternOp(\n op.name,\n op.tag,\n attrs,\n op.inputs,\n op.input_placeholders,\n op.output_placeholders,\n op.body,\n )\n else:\n raise RuntimeError(\"Unsupported op type: \" + str(type(op)))\n\n if isinstance(node, tensor.Tensor):\n return op.output(0)\n return [op.output(i) for i in range(len(node))]", "def build_metric_func(dataset_split_name, add_summary=True):\n\n def metric_func(labels, logits):\n \"\"\"Evaluation metric function that runs on CPU.\"\"\"\n accuracy_metric_name = 'Eval/Accuracy/%s' % dataset_split_name\n metric_map = {\n accuracy_metric_name: tf.metrics.accuracy(labels, tf.argmax(logits, 1)),\n }\n if add_summary:\n for name, value in metric_map.items():\n tf.summary.scalar(name, value)\n return metric_map\n\n return metric_func", "def make_arg(arg):\n return arg.result if isinstance(arg, (Op, Block)) else arg", "def _process_quantiles(x, dim):\r\n x = np.asarray(x, dtype=float)\r\n\r\n if x.ndim == 0:\r\n x = x[np.newaxis]\r\n elif x.ndim == 1:\r\n if dim == 1:\r\n x = x[:, np.newaxis]\r\n else:\r\n x = x[np.newaxis, :]\r\n\r\n return x", "def predict(self, X, quantile=0.5):\n qntl = np.asanyarray(quantile)\n ntrees = self.forest.n_estimators\n ntrain = self.trainy.shape[0]\n npred = X.shape[0]\n pred_tree_node_ID = np.zeros([npred, ntrees])\n\n for i in range(ntrees):\n pred_tree_node_ID[:, i] = self.forest.estimators_[i].apply(X)\n\n return find_quant(self.trainy, self.train_tree_node_ID, pred_tree_node_ID.astype('h'), qntl)", "def quantile(self, quantile: float, accuracy: int = 10000) -> FrameLike:\n return super().quantile(quantile, accuracy)", "def quantile(self, quantile: float, accuracy: int = 10000) -> FrameLike:\n return super().quantile(quantile, accuracy)", "def quantile(self, quantile: float, accuracy: int = 10000) -> FrameLike:\n return super().quantile(quantile, accuracy)" ]
[ "0.5792982", "0.5327944", "0.51979405", "0.51626", "0.5160351", "0.5001683", "0.4997703", "0.49878305", "0.49823052", "0.49398604", "0.48812854", "0.48698434", "0.4853174", "0.4833269", "0.48265964", "0.47628295", "0.47479182", "0.47349328", "0.47272837", "0.4723074", "0.47207436", "0.47106826", "0.47088286", "0.47077182", "0.47015458", "0.46683112", "0.46673626", "0.46393627", "0.46393627", "0.46393627" ]
0.6132188
0
Returns a new fox instance. Expects a gender (true=male) and a location
def create_fox(a_male,a_location): fox = None if a_male: fox = Fox() else: fox = Vixen() fox.location = a_location return fox
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def person_object_factory():\n person = {\n 'lastName': rl_fake().last_name(),\n 'gender': random.choice(('M', 'F'))\n }\n\n # Make the person's name match their gender.\n person['firstName'] = rl_fake().first_name_male() if person['gender'] == 'M' else rl_fake().first_name_female()\n\n # These are all optional in the DB. Over time, we'll try all possibilities.\n if flip():\n person['birthday'] = rl_fake().date_of_birth(minimum_age=18).strftime('%Y-%m-%d')\n if flip():\n person['phone'] = rl_fake().phone_number()\n if flip():\n person['email'] = rl_fake().email()\n return person", "def gender():\n return random.choice((GENDER_FEMALE, GENDER_MALE))", "def faked_location(self):\n if self.location:\n return self.location\n else:\n location = UserLocation()\n location.lat = settings.FAKED_LAT\n location.lng = settings.FAKED_LNG\n location.updated_at = datetime.now() - timedelta(minutes=random.randint(0, 60 * 24 * 30))\n self.location = location\n\n return location", "def __init__(self, first_name, last_name, age, gender):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n self.gender = gender", "def create(**kwargs):\n\n generator = AbilityGeneratorFactory()\n abilities = generator.create(method=kwargs.get('generator',\n BEST_OF_THREE), profession=kwargs.get('profession'))\n return Dwarf(abilities, kwargs['st'])", "def gender():\r\n\r\n return _random.choice(['Male', 'Female'])", "def __init__(self, gender=None, top_attire_color=None, bottom_attire_color=None, top_type=None, bottom_type=None, is_long_sleeve=None, is_headdress_present=None, is_bag_present=None): # noqa: E501 # noqa: E501\n self._gender = None\n self._top_attire_color = None\n self._bottom_attire_color = None\n self._top_type = None\n self._bottom_type = None\n self._is_long_sleeve = None\n self._is_headdress_present = None\n self._is_bag_present = None\n self.discriminator = None\n if gender is not None:\n self.gender = gender\n if top_attire_color is not None:\n self.top_attire_color = top_attire_color\n if bottom_attire_color is not None:\n self.bottom_attire_color = bottom_attire_color\n if top_type is not None:\n self.top_type = top_type\n if bottom_type is not None:\n self.bottom_type = bottom_type\n if is_long_sleeve is not None:\n self.is_long_sleeve = is_long_sleeve\n if is_headdress_present is not None:\n self.is_headdress_present = is_headdress_present\n if is_bag_present is not None:\n self.is_bag_present = is_bag_present", "def __init__(self, f_name, l_name, age, gender, m_number):\n self.f_name = f_name\n self.l_name = l_name\n self.age = age\n self.gender = gender\n self.m_number = m_number", "def person(languages=None, genders=None):\n languages = languages or ['en']\n genders = genders or (GENDER_FEMALE, GENDER_MALE)\n\n\n lang = random.choice(languages)\n g = random.choice(genders)\n t = title([lang], [g])\n return first_name([lang], [g]), last_name([lang]), t, g", "def __init__(self, name, age, gender):\n\n self._name = name\n self._age = age\n self._gender = gender\n self._friend = None", "def post(self):\n return self.get_request_handler(request.headers).create_new_gender(request)", "def __init__(self, name: str=None, gender: str=None): # noqa: E501\n self.swagger_types = {\n 'name': str,\n 'gender': str\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'gender': 'gender'\n }\n self._name = name\n self._gender = gender", "def first_name_and_gender():\n fn=''\n mn=''\n g = 'M' if random.randint(0,1) == 0 else 'F'\n if g=='M':\n fn = fake.first_name_male()\n mn = fake.first_name_male()\n else:\n fn = fake.first_name_female()\n mn = fake.first_name_female()\n\n return fn,mn", "def UpdateablePersonFactory(FullPersonFactory):\n def create_updateable_person(address_book, **kw):\n kw.setdefault('keywords', [KEYWORD])\n kw.setdefault('last_name', u'Tester')\n return FullPersonFactory(address_book, **kw)\n return create_updateable_person", "def define_gender(name_input):\n if not os.path.isfile('train_set.txt') and not os.path.isfile('test_set'):\n \"\"\"\n We take a sample of male and female names and mix\n them in order to create a training set and testing set\n \"\"\"\n labeled_names = ([(name, 'male') for name in names.words('male.txt')] +\n [(name, 'female') for name in names.words(\n 'female.txt')])\n random.shuffle(labeled_names)\n\n \"\"\"\n We train the classifier and return the gender of the name\n \"\"\"\n featuresets = [(gender_features(n), gender) for (n, gender)\n in labeled_names]\n train_set, test_set = featuresets[-500:], featuresets[:500]\n classifier = nltk.NaiveBayesClassifier.train(train_set)\n with open('train_set.txt', 'wb') as handle:\n pickle.dump(train_set, handle)\n with open('test_set.txt', 'wb') as handle:\n pickle.dump(test_set, handle)\n with open('classifier.txt', 'wb') as handle:\n pickle.dump(classifier, handle)\n\n with open('train_set.txt', 'rb') as handle:\n train_set = pickle.load(handle)\n with open('test_set.txt', 'rb') as handle:\n test_set = pickle.load(handle)\n with open('classifier.txt', 'rb') as handle:\n classifier = pickle.load(handle)\n\n classifier = nltk.NaiveBayesClassifier.train(train_set)\n# accuracy = nltk.classify.accuracy(classifier, test_set)\n# classifier.show_most_informative_features(10)\n# print accuracy\n\n \"\"\"\n Accuracy: .804\n Most Informative Features\n last_letter = u'a' female : male = 44.0 : 1.0\n last_letter = u'd' male : female = 23.7 : 1.0\n last_two_letters = u'on' male : female = 11.0 : 1.0\n first_two_letters = u'ha' male : female = 7.8 : 1.0\n last_two_letters = u'ta' female : male = 7.0 : 1.0\n last_letter = u't' male : female = 6.7 : 1.0\n last_letter = u'o' male : female = 6.0 : 1.0\n last_two_letters = u'll' male : female = 4.7 : 1.0\n first_two_letters = u'te' male : female = 4.7 : 1.0\n last_two_letters = u'an' male : female = 4.1 : 1.0\n \"\"\"\n\n return classifier.classify(gender_features(name_input))", "def create(cls, *args, **kwargs):\n family = cls(*args, **kwargs)\n family.full_clean()\n family.save()\n return family", "def test_world(self):\n f = AvatarFactory('world')\n self.assertEqual(f.world, 'world')", "def __init__(self, location, state = State.E):\n self.location = location\n self.state = state", "def _create_user(self, email, username, password, gender=2, **extra_fields):\n if not email:\n raise ValueError('The given email must be set')\n\n email = self.normalize_email(email)\n username = self.model.normalize_username(username)\n user = self.model(email=email, username=username, gender=gender, **extra_fields)\n user.set_password(password)\n user.save(using=self._db)\n\n return user", "def locationFactory(geomType):\n options = {\n geometry.CARTESIAN: CartesianLocation,\n geometry.RZT: ThetaRZLocation,\n geometry.HEX: HexLocation,\n geometry.DODECAGON: HexLocation, # yes, it's same as hex. That's what we want.\n geometry.RZ: ThetaRZLocation,\n # database names...\n geometry.REC_PRISM: CartesianLocation,\n geometry.HEX_PRISM: HexLocation,\n geometry.ANNULUS_SECTOR_PRISM: ThetaRZLocation,\n }\n\n locClass = options.get(geomType)\n if not locClass:\n raise ValueError('Unsupported geometry option: \"{}\"'.format(geomType))\n\n return locClass", "def test_patient_one_gender(self):\r\n self.assertEqual(self.test_patient.gender, 'Male')", "def __init__(self, age=None, civilstatus=None, earnings=None, gender=None, habit=None, hobby=None, kind_of_bussiness=None, kind_of_occupation=None, location=None, moving=None, occupation=None, position=None): # noqa: E501 # noqa: E501\n self._age = None\n self._civilstatus = None\n self._earnings = None\n self._gender = None\n self._habit = None\n self._hobby = None\n self._kind_of_bussiness = None\n self._kind_of_occupation = None\n self._location = None\n self._moving = None\n self._occupation = None\n self._position = None\n self.discriminator = None\n if age is not None:\n self.age = age\n if civilstatus is not None:\n self.civilstatus = civilstatus\n if earnings is not None:\n self.earnings = earnings\n if gender is not None:\n self.gender = gender\n if habit is not None:\n self.habit = habit\n if hobby is not None:\n self.hobby = hobby\n if kind_of_bussiness is not None:\n self.kind_of_bussiness = kind_of_bussiness\n if kind_of_occupation is not None:\n self.kind_of_occupation = kind_of_occupation\n if location is not None:\n self.location = location\n if moving is not None:\n self.moving = moving\n if occupation is not None:\n self.occupation = occupation\n if position is not None:\n self.position = position", "def get_random_first_name (gender = None):\n if not gender:\n gender = random.choice(('f', 'm'))\n\n if gender == 'f':\n return get_random_female_name()\n else:\n return get_random_male_name()", "def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> BookingStaffMember:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return BookingStaffMember()", "def set_gender(self, g, line_number=0):\n self.gender = g\n self._gender_line = line_number", "def new_fountain():\n fountain = Composite()\n set_dungeon_feature_components(fountain)\n fountain.set_child(Description(\"Fountain\",\n (\"A Fountain full of clean water\",\n \"surely you will become more\",\n \"healthy by drinking this.\")))\n fountain.set_child(GraphicChar(None, colors.CYAN, icon.FOUNTAIN_FULL))\n fountain.set_child(DrinkFromFountainAction())\n return fountain", "def gender(self, value: str) -> None:\n gender_mapper = {\n 'Muž': 'm',\n 'm': 'm',\n 'Žena': 'z',\n 'z': 'z'\n }\n self._gender = gender_mapper[value]", "def new_family(request, **kwargs):\n varz = default_family_form_vars()\n varz.update(kwargs)\n c = RequestContext(request, varz)\n t = loader.get_template('family_info/add_edit_family.html')\n return HttpResponse(t.render(c))", "def faker() -> Faker:\n\n return Faker()", "def test_components_profile_gender(self):\r\n\t\tself.assertEqual(self.u1.profile.gender, 'female')" ]
[ "0.5613212", "0.5395922", "0.5308169", "0.5271084", "0.5211801", "0.5208754", "0.5162016", "0.51154745", "0.5076069", "0.5022176", "0.50201225", "0.50070053", "0.5006258", "0.5003537", "0.49931666", "0.49746263", "0.49349275", "0.4879405", "0.4857509", "0.48508057", "0.4823837", "0.4819669", "0.48189038", "0.48067102", "0.47797886", "0.4765897", "0.4756127", "0.47546333", "0.475058", "0.47454494" ]
0.77177227
0
Splits a daterange in even buckets
def _split_date_range(start, end, intv): previous = start diff = (end - start) / intv for i in range(1, intv): current = start + diff * i yield (previous, current) previous = current yield (previous, end)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_split_ranges(self):\n start = datetime.utcnow() - pd.Timedelta(\"5H\")\n end = datetime.utcnow() + pd.Timedelta(\"5min\")\n delta = pd.Timedelta(\"1H\")\n\n ranges = QueryProvider._calc_split_ranges(start, end, delta)\n self.assertEqual(len(ranges), 5)\n self.assertEqual(ranges[0][0], start)\n self.assertEqual(ranges[-1][1], end)\n\n st_times = [start_tm[0] for start_tm in ranges]\n for end_time in (end_tm[1] for end_tm in ranges):\n self.assertNotIn(end_time, st_times)\n\n end = end + pd.Timedelta(\"20min\")\n ranges = QueryProvider._calc_split_ranges(start, end, delta)\n self.assertEqual(len(ranges), 5)\n self.assertEqual(ranges[0][0], start)\n self.assertEqual(ranges[-1][1], end)", "def split_range(r, n):\n \n step = int(r / n)\n segments = []\n for i in range(n):\n new_segment = [step * i, step * (i + 1)]\n segments.append(new_segment)\n # correct the gap in the missing index due to the truncated step\n segments[-1][-1] = r\n return segments", "def chunk_periods(start, end):\n\n logging.debug(f'chunking {start} to {end}')\n # convert the strings to datetime objects\n #start = dt.datetime.strptime(''.join(start.rsplit(':', 1)), '%Y-%m-%dT%H:%M:%S-%z')\n start = dt.datetime.strptime(start, '%Y-%m-%dT%H:%M:%S-%z')\n logging.debug(f'start: {start}')\n periods = []\n\n # if the year and month of the period are the same, just return the dates as we got them\n\n\n\n return periods", "def split_half(slice):\n slice = set_date(slice)\n slice['Date'] = pd.to_datetime(slice['Date'])\n col_names = ['Date', 'Label', 'Text Extract', 'Processed']\n slice = slice[col_names]\n max_date = slice['Date'].max()\n min_date = slice['Date'].min()\n split_day = min_date + (max_date-min_date) // 2\n slice_b = slice[slice['Date'] > split_day]\n slice = slice[slice['Date'] <= split_day]\n return slice, slice_b", "def split_range(valsize, step, start, end):\n \n shift = 0\n while True:\n diff = 1 << (shift + step)\n mask = ((1 << step) - 1) << shift\n setbits = lambda x: x | ((1 << shift) - 1)\n \n haslower = (start & mask) != 0\n hasupper = (end & mask) != mask\n \n not_mask = ~mask & ((1 << valsize + 1) - 1)\n nextstart = (start + diff if haslower else start) & not_mask\n nextend = (end - diff if hasupper else end) & not_mask\n \n if shift + step >= valsize or nextstart > nextend:\n yield (start, setbits(end), shift)\n break\n \n if haslower:\n yield (start, setbits(start | mask), shift)\n if hasupper:\n yield (end & not_mask, setbits(end), shift)\n \n start = nextstart\n end = nextend\n shift += step", "def merge_ranges():", "def split_iter(self, delta):\n interval_start = self.start_date\n while interval_start < self.end_date:\n interval_end = interval_start + delta\n if interval_end > self.end_date:\n interval_end = self.end_date\n yield DateRange(interval_start, interval_end)\n interval_start = interval_end + relativedelta(days=1)", "def _split_chunk_bounds(\n start: int, stop: int, multiple: int,\n) -> List[Tuple[int, int]]:\n # pylint: disable=g-doc-args\n # pylint: disable=g-doc-return-or-yield\n if multiple == -1:\n return [(start, stop)]\n assert start >= 0 and stop > start and multiple > 0, (start, stop, multiple)\n first_multiple = (start // multiple + 1) * multiple\n breaks = list(range(first_multiple, stop, multiple))\n return list(zip([start] + breaks, breaks + [stop]))", "def find_ranges(iterable):\n for group in mit.consecutive_groups(iterable):\n group = list(group)\n if len(group) == 1:\n yield [group[0], group[0]]\n else:\n yield [group[0], group[-1]]\n return group", "def interval_split(a,b,split_ps):\n ps = [a] + [s for s in sorted(split_ps) if a < s < b] + [b]\n return [(p1,p2) for p1,p2 in zip(ps,ps[1:])]", "def ex_crange(data):\n center = minv = maxv = spread = 0\n step = 1\n try:\n center = int(data[0])\n spread = int(data[1])\n if len(data) > 2:\n step = int(data[2])\n minv = center - spread/2\n maxv = center + spread/2\n except ValueError:\n pass\n if step == 0:\n step = 1\n if minv > maxv:\n minv, maxv = maxv, minv\n rv = [center]\n v = center - step\n while minv <= v <= maxv:\n rv.insert(0, v)\n v -= step\n v = center + step\n while minv <= v <= maxv:\n rv.append(v)\n v += step\n return rv", "def calculate_ranges(a, b):\n try:\n ranges = list(range(0, a, a//b))\n if ranges[-1] != a:\n ranges.append(a)\n return ranges\n except ValueError:\n return [0, a]", "def _get_sharded_ranges(\n begin,\n end,\n max_length,\n):\n if max_length <= 0:\n raise ValueError(\"max_length <= 0.\")\n length = end - begin\n if length <= max_length:\n return [(begin, end)]\n pivot = begin + length // 2\n return (_get_sharded_ranges(begin, pivot, max_length) +\n _get_sharded_ranges(pivot, end, max_length))", "def r_style_interval(from_tuple, end_tuple, frequency):\n from_year, from_seg = from_tuple\n end_year, end_seg = end_tuple\n n = (end_year - from_year + 1) * frequency\n full_range = np.linspace(from_year, end_year + 1, num=n, endpoint=False)\n real_range = full_range[(from_seg - 1):n - (frequency - end_seg)]\n return real_range", "def Split(self, k):\n n = len(self)\n start = range(0, n, ceil(n / k))\n end = list(start[1:]) + [n]\n return [range(first, last) for first, last in zip(start, end)]", "def ex_range(data):\n a, b, step = _cleanse_range_args(data)\n return list(range(a, b+sign(step), step))", "def _build_intervals(self) -> List[Tuple[datetime.datetime, datetime.datetime]]:\n if self.granularity == 'HOUR':\n days = max(min((self.bounds[1] - self.bounds[0]).days,\n self.GRANULARITIES['HOUR'][1]),\n self.GRANULARITIES['HOUR'][0])\n interval_length = datetime.timedelta(days=days)\n offset = datetime.timedelta(hours=1)\n elif self.granularity == 'MONTH':\n # no need to split requests for monthly data\n days = max((self.bounds[1] - self.bounds[0]).days,\n self.GRANULARITIES['MONTH'][0])\n interval_length = datetime.timedelta(days=days)\n offset = datetime.timedelta(days=1)\n else:\n days = max(min((self.bounds[1] - self.bounds[0]).days,\n self.GRANULARITIES['DAY'][1]),\n self.GRANULARITIES['DAY'][0])\n interval_length = datetime.timedelta(days=days)\n offset = datetime.timedelta(days=1)\n\n time_pointer = self.bounds[1]\n intervals = []\n while time_pointer > self.bounds[0]:\n upper = time_pointer\n time_pointer -= interval_length\n intervals.append((time_pointer, upper))\n time_pointer -= offset\n return intervals", "def chunk_date_range(self, start_datetime, end_datetime, chunk_size):\n self.log.info(f'Chunking period {start_datetime} to {end_datetime} into chunks of {chunk_size} days.')\n for n in range(int ((end_datetime - start_datetime).days) + 1):\n if n/chunk_size == int(n/chunk_size):\n start = start_datetime + datetime.timedelta(n)\n end = start_datetime + datetime.timedelta(n+chunk_size)\n \n # if we reach the end_datetime, return that instead of end\n if end < end_datetime:\n yield (start, end)\n else:\n yield (start, end_datetime)", "def get_date_range(num_days):\n\n date1 = datetime.datetime.utcnow()\n dateranges = []\n \n if num_days > 90:\n chunks = math.ceil(num_days/90)\n print('Breaking dates into into', chunks,'90 day chunks.')\n\n for chunk in range(1,chunks+1):\n date2 = date1 - datetime.timedelta(days=90)\n\n start = add_milliseconds(date1)\n end = add_milliseconds(date2)\n\n print('Chunk', chunk, ': ', date1, 'to', date2)\n dateranges.append((start,end))\n date1 = date2 - datetime.timedelta(days=1)\n \n else: \n date1 = datetime.datetime.utcnow()\n date2 = date1 - datetime.timedelta(days=num_days)\n \n start = add_milliseconds(date1)\n end = add_milliseconds(date2)\n \n dateranges.append((start,end))\n \n return(dateranges)", "def expand_ranges(ranges):\n for low, high in low_high_pairs:\n for j in range(low, high+1):\n yield j", "def get_splits(ntot, nper):\n beglist = numpy.arange(0,ntot,nper)\n endlist = numpy.arange(0,ntot,nper) + nper - 1\n\n if (ntot % nper) != 0:\n endlist[-1] = ntot-1\n return beglist, endlist", "def split_kbucket(self):\n cur_range_size = self.range_max - self.range_min\n half_point = self.range_min + cur_range_size // 2\n\n # Ensure no empty range is created.\n assert self.range_min < half_point < self.range_max\n\n # Make the instantiation dependent on the actual class,\n # for easy inheritance.\n new_kbucket = self.__class__(half_point, self.range_max)\n\n # Halve the ID space of the split KBucket.\n self.range_max = half_point\n\n # Split the contact list into two, according to the new ranges.\n self._contacts, new_kbucket._contacts = util.partition(\n self._contacts,\n self.contact_in_range\n )\n\n return new_kbucket", "def findRanges(data_grouped):\n ranges = []\n for i in data_grouped.columns:\n theRange = (data_grouped[i].min(), data_grouped[i].max())\n ranges.append(theRange)\n return ranges", "def split_segments(old_seg_ends, B):\n new_segment_ends = []\n for q in range(0, B.size):\n new_ends = list(np.linspace(old_seg_ends[q], old_seg_ends[q + 1], B[q] + 1))\n new_segment_ends.extend(new_ends[:-1])\n new_segment_ends.extend([1])\n new_segment_ends = np.asarray(new_segment_ends)\n return new_segment_ends", "def _bucket_boundaries(max_length, min_length=8, length_bucket_step=1.1):\n assert length_bucket_step > 1.0\n x = min_length\n boundaries = []\n while x < max_length:\n boundaries.append(x)\n x = max(x + 1, int(x * length_bucket_step))\n return boundaries", "def new_ranges(rs):\n return tuple(chain(*[new_range(r) for r in rs]))", "def buildIntervalSegs(array, interval: int):\n interSegs = []\n for i in range(interval):\n interSegs.append(array[i::interval])\n return np.array(interSegs)", "def datelst_get_month_aligned_bounds(dates_):\n dfirst = dates_[0]\n dlast = dates_[-1]\n\n bound_lo = dt.datetime(dfirst.year, dfirst.month, 1)\n bound_hi = (dt.datetime(dates_[-1].year, dates_[-1].month, 1)+dt.timedelta(days=32))\n bound_hi.replace(day=1)\n bound_hi = bound_hi.replace(day=1) - dt.timedelta(seconds=1)\n\n return (bound_lo, bound_hi)", "def daterange(start_date, end_date):\n for n in range(int ((end_date - start_date).days)+1):\n yield start_date + timedelta(n)", "def separate_periods(races, season_start, period_length):\n period_delta = datetime.timedelta(weeks=period_length)\n period_lower = season_start\n period_upper = season_start + period_delta\n period_buf = []\n bucket = []\n for race in races:\n # the date part of the race tuples is a python datetime object\n # so isocalendar()[1] is the week in the year (1-52) that the race\n # occurred.\n\n # this algorithm (hard coded for 2 week periods) will iteratively\n # add races into a \"bucket\" then once it reaches a race (in the\n # collection of tuples that we sorted earlier) in the next period,\n # the bucket has all of a period's races. it adds that full bucket\n # to period_buf, puts the race (which occurred in the next period)\n # to a new bucket, and starts over\n if period_lower <= race[1] < period_upper:\n bucket.append(race[0])\n else:\n period_buf.append(bucket)\n period_lower += period_delta\n period_upper += period_delta\n bucket = []\n bucket.append(race[0])\n\n # add the last bucket\n period_buf.append(bucket)\n\n return period_buf" ]
[ "0.70844066", "0.6660324", "0.65962434", "0.63845986", "0.63150877", "0.624241", "0.6117686", "0.61104435", "0.6062371", "0.603898", "0.59842044", "0.5892017", "0.58731925", "0.58605164", "0.5858732", "0.5845585", "0.5840416", "0.58369154", "0.5805926", "0.5704877", "0.5680807", "0.5636124", "0.5630568", "0.5623248", "0.56139463", "0.56092864", "0.560352", "0.5603169", "0.5602754", "0.5589887" ]
0.7247785
0
RPC request to subscribe to specific type of transactions
def subscribe(self, topic_type, tx_filter=None): request = protos.RequestSubscribe(type=topic_type, filter=tx_filter) res = self.stub.subscribe(request) for r in res: yield r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def subscribe(self, namespace, sub_strings=None):\n req = JSONRPCRequest('subscribe', [namespace, sub_strings])\n result = yield self._send(req)\n self._cache_jsonrpc_request(req)\n raise tornado.gen.Return(result)", "def subscribe(self, req: SubscribeRequest) -> None:\n if self.login_status:\n exchange = EXCHANGE_VT2TORA[req.exchange]\n self.api.SubscribeMarketData([str.encode(req.symbol)], exchange)", "def subscribe(self, req: SubscribeRequest) -> None:\n if self.login_status:\n exchange = EXCHANGE_VT2TORA[req.exchange]\n self.api.SubscribeMarketData([str.encode(req.symbol)], exchange)", "def subscribe(receiver):", "def subscribe(receiver):", "def subscribe(receiver):", "def _handle_subscribe_request(state, client, request):\n args = request[\"args\"]\n for arg in args:\n topic, symbol = arg.split(\":\")\n symbol = symbol\n client.add_market_data_subscription(topic, symbol)\n if topic == 'orderBookL2':\n # Order book snapshot is always sent at the beginning of new market data subscription. The snapshot\n # enables the market data subscribers to recover the current state of the order book.\n _send_order_book_snapshot(state, client, symbol)\n elif topic == 'trade':\n # TODO: send trade snapshot\n pass\n else:\n # TODO: send not valid topic\n pass", "def subscribe(self, _type, symbol):\n self._assert_types_are_correct([_type])\n self.types.add(_type)\n self.symbols.add(symbol)\n\n if _type == 'tickers':\n self.wss.subscribe_to_ticker(symbol)\n elif _type == 'trades':\n self.wss.subscribe_to_trades(symbol)\n elif isinstance(_type, tuple):\n self.wss.subscribe_to_candles(symbol, timeframe=_type[1])", "def _subscribe(self, sub_type: str, sub_version: str, condition: dict, callback) -> str:\n self.__logger.debug(f'subscribe to {sub_type} version {sub_version} with condition {condition}')\n data = {\n 'type': sub_type,\n 'version': sub_version,\n 'condition': condition,\n 'transport': {\n 'method': 'webhook',\n 'callback': f'{self.callback_url}/callback',\n 'secret': self.secret\n }\n }\n r_data = self.__api_post_request(TWITCH_API_BASE_URL + 'eventsub/subscriptions', data=data)\n result = r_data.json()\n error = result.get('error')\n if r_data.status_code == 500:\n raise TwitchBackendException(error)\n if error is not None:\n if error.lower() == 'conflict':\n raise EventSubSubscriptionConflict(result.get('message', ''))\n raise EventSubSubscriptionError(result.get('message'))\n sub_id = result['data'][0]['id']\n self.__add_callback(sub_id, callback)\n if self.wait_for_subscription_confirm:\n timeout = datetime.datetime.utcnow() + datetime.timedelta(\n seconds=self.wait_for_subscription_confirm_timeout)\n while timeout >= datetime.datetime.utcnow():\n if self.__callbacks[sub_id]['active']:\n return sub_id\n asyncio.get_event_loop().run_until_complete(asyncio.sleep(0.01))\n self.__callbacks.pop(sub_id, None)\n raise EventSubSubscriptionTimeout()\n return sub_id", "def subscribe_topic(self):\n req = {\n \"op\": \"subscribe\",\n \"args\": [\n \"instrument\",\n \"trade\",\n \"orderBook10\",\n \"execution\",\n \"order\",\n \"position\",\n \"margin\",\n ],\n }\n self.send_packet(req)", "def subscribe(self, name, params, callback=None):\n cur_id = self._next_id()\n if callback:\n self._callbacks[cur_id] = callback\n self.send({'msg': 'sub', 'id': cur_id, 'name': name, 'params': params})\n return cur_id", "def subscribe(self, name, params, callback=None):\n cur_id = self._next_id()\n if callback:\n self._callbacks[cur_id] = callback\n self.send({'msg': 'sub', 'id': cur_id, 'name': name, 'params': params})\n return cur_id", "def PublishTransaction(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def subscribe_balances(self, update_handler):\n pass", "def cn_pack_subscribe_req(cls, cookie, envtype, orderid_list, order_deal_push, push_at_once):\n str_id = u''\n for orderid in orderid_list:\n if len(str_id) > 0:\n str_id += u','\n str_id += str(orderid)\n\n req = {\"Protocol\": \"5100\",\n \"Version\": \"1\",\n \"ReqParam\": {\"Cookie\": cookie,\n \"EnvType\": envtype,\n \"OrderID\": str_id,\n \"SubOrder\": order_deal_push,\n \"SubDeal\": order_deal_push,\n \"FirstPush\": push_at_once,\n }\n }\n req_str = json.dumps(req) + '\\r\\n'\n return RET_OK, \"\", req_str", "def subscribe_trades(self, symbol, update_handler=None):\n pass", "def subscribe(self, instrument_ids, exchange_id=b''):\n pass", "def subscribe(self, subscription_type, callback):\n if subscription_type in self._subscriptions.keys():\n self._subscriptions[subscription_type].append(callback)", "def subscribe_wql_istype(self, node, type,\n msg_handler):\n if isinstance(node, Literal) or isinstance(type, Literal):\n return None # No literals allowed here\n self.msg_handler = msg_handler\n self.tr_id = get_tr_id()\n xml_msg = self._create_wql_istype_msg(self.tr_id, node,\n type)\n self.conn.connect()\n self.conn.send(xml_msg)\n cnf = self.conn.receive()\n self._check_error(cnf)\n\n self.sub_id = cnf[\"subscription_id\"]\n # self.msg_handler.handle(initial_result)\n sub_h = WQLBooleanSubscribeHandler(self.node_id, self.tr_id,\n self.conn, msg_handler)\n sub_h.start()\n if cnf[\"results\"] == \"TRUE\":\n return True\n else:\n return False", "def subscribe_to_query_api(self, version, subscription_request, secure=False):\n resource_type = self._get_resource_type(subscription_request[\"resource_path\"])\n\n resource_types = ['node', 'device', 'source', 'flow', 'sender', 'receiver']\n\n if resource_type not in resource_types:\n raise SubscriptionException(\"Unknown resource type:\" + resource_type\n + \" from resource path:\" + subscription_request[\"resource_path\"])\n\n try:\n # Guard against concurrent subscription creation\n self.subscription_lock.acquire()\n\n subscription = next(iter([subscription for id, subscription in self.get_resources()['subscription'].items()\n if self._get_resource_type(subscription['resource_path']) == resource_type\n and subscription['max_update_rate_ms'] == subscription_request['max_update_rate_ms']\n and subscription['persist'] == subscription_request['persist']\n and subscription['secure'] == subscription_request['secure']]), None)\n\n if subscription:\n return subscription, False\n\n websocket_port = WEBSOCKET_PORT_BASE + len(self.subscription_websockets)\n websocket_server = SubscriptionWebsocketWorker('0.0.0.0', websocket_port, resource_type, secure)\n websocket_server.set_queue_sync_data_grain_callback(self.queue_sync_data_grain)\n websocket_server.start()\n\n subscription_id = str(uuid.uuid4())\n\n protocol = 'wss' if secure else 'ws'\n\n host = get_mocks_hostname() if secure else get_default_ip()\n\n subscription = {'id': subscription_id,\n 'max_update_rate_ms': subscription_request['max_update_rate_ms'],\n 'params': subscription_request['params'],\n 'persist': subscription_request['persist'],\n 'resource_path': subscription_request['resource_path'],\n 'secure': secure,\n 'ws_href': protocol + '://' + host + ':' + str(websocket_port)\n + '/x-nmos/query/' + version + '/subscriptions/' + subscription_id,\n 'version': IS04Utils.get_TAI_time()}\n\n self.subscription_websockets[subscription_id] = {'server': websocket_server, 'api_version': version}\n\n self.get_resources()['subscription'][subscription_id] = subscription\n finally:\n self.subscription_lock.release()\n\n return subscription, True # subscription_created=True", "def subscribe(receiver, catchup):", "def post_transaction():\n tx_dict = encode_transaction(\"gautham=awesome\") \n print(tx_dict)\n\n tendermint_host = 'localhost'\n tendermint_port = 26657\n endpoint = 'http://{}:{}/'.format(tendermint_host, tendermint_port)\n\n payload = {\n 'method': 'broadcast_tx_commit',\n 'jsonrpc': '2.0',\n #'params': [encode_transaction(tx_dict)],\n 'params': [tx_dict],\n 'id': str(uuid4())\n }\n # TODO: handle connection errors!\n print(payload)\n return requests.post(endpoint, json=payload)", "def subscribe_order_book_request_packet(self, pair_id):\n pass", "def subscribe(self, transport, data):\r\n\r\n self.add(transport, address=data.get('hx_subscribe'))\r\n\r\n self.send(\r\n data.get('hx_subscribe'),\r\n {'message': \"%r is listening\" % transport}\r\n )", "def subscribe(self, meta_type, callback, can_nack=False):\n if not can_nack:\n subscriber_list = self.subscribers.setdefault(meta_type, [])\n else:\n subscriber_list = self.nackables.setdefault(meta_type, [])\n subscriber_list.append(callback)", "def subscribe(self, req: SubscribeRequest):\n tick = TickData(\n symbol=req.symbol,\n exchange=req.exchange,\n name=req.symbol,\n datetime=datetime.now(),\n gateway_name=self.gateway_name,\n )\n self.ticks[req.symbol] = tick", "def _resend_subscriptions(self):\n for req in self._ws_jsonrpc_cache:\n if req.method == 'subscribe':\n self._logger.info('Resending JSONRPCRequest %s', req)\n result = yield self._send(req)\n self._logger.info(\n 'Resent JSONRPCRequest, with result: %s', result)", "def subscribe_user_trades(self, update_handler):\n pass", "def subscribe(self, **subscription_request):\n return self.subscribe_impl(mode='subscribe', **subscription_request)", "def subscribe(self, feed, **args):\n args.update(feed=feed)\n return self.fetch(\"/subscribe\", post_args=args)" ]
[ "0.64912724", "0.63196254", "0.63196254", "0.6257161", "0.6257161", "0.6257161", "0.6179839", "0.61468124", "0.5952488", "0.5937249", "0.57590926", "0.57590926", "0.57562625", "0.57485455", "0.5745518", "0.5726276", "0.572594", "0.5714911", "0.57020134", "0.563073", "0.56236506", "0.56213623", "0.5620803", "0.55827385", "0.5581728", "0.5566682", "0.55290955", "0.5516536", "0.5514664", "0.5510486" ]
0.6478053
1
close the file handler
def close(self) -> None: if self.file_handler: self.file_handler.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _close(self):\n self.fh.close()", "def close_file_handle(self):\n if self.file_handle and self.output_file:\n self.file_handle.close()", "def close_file(self):\r\n self.file.close()", "def close(self):\r\n self._fp.close()", "def close(self):\n self.fileobj.close()", "def close(self):\n self.fileobj.close()", "def close(self):\n self.f.close()", "def close(self):\n self.f.close()", "def close_file(self):\n self.file.close()", "def close_file(self):\n self.file.close()", "def close(self) -> None:\n self.f.close()", "def close(self):\n\t\tself.filep.close()", "def close(self):\r\n if self._filename and self._fh:\r\n self._fh.close()\r\n self._fh = None", "def close(self):\n self.__file.close()", "def close(self):\n self.__file_object.close()", "def close(self):\n self._fp.close()", "def close(self):\n if not self.__closed:\n self.counters = { \"error\": 0, \"warning\": 0, \"success\": 0, \"failure\": 0 }\n\n try:\n self.__flush_count = 0\n for handler in self.__filehandlers:\n handler.flush()\n self.__logger.removeHandler(handler)\n handler.close()\n except:\n # do nothing\n pass\n self.__closed = True", "def close(self):\n self.file.close()", "def close(self):\n self.file.close()", "def close(self):\r\n self._fileobjclosed = True\r\n self._sockobj.close()", "def close():\n # self.consumer.close()\n # close file handler\n LOGGER.info(\"consumer is closed!!\")", "def close(self):\r\n self.rfile.close()\r\n self.sock_chan.close()", "def _close_file_logger(self):\n if self._file_log_handler is not None:\n self._file_log_handler.flush()\n self._file_log_handler.close()\n self.logger.removeHandler(self._file_log_handler)\n self._file_log_handler = None\n self.logger.propagate = True", "def close(self):\n self.handle.close()", "def close(self):\n if self.current_file_number is not None:\n self.fh_raw.close()\n self.current_file_number = None", "def close(self):\n if not self.file.closed:\n self.file.close()", "def close(self):\r\n self._fd.close()", "def close(self):\n self.file.close()\n self.file = None", "def close(self):\n self._file.close()", "def close(self):\n self._file.close()" ]
[ "0.80638564", "0.7899277", "0.76776", "0.7598503", "0.759643", "0.759643", "0.7555477", "0.7555477", "0.75353223", "0.75353223", "0.7534309", "0.74882436", "0.746249", "0.74507326", "0.74417937", "0.7408353", "0.74020976", "0.7385599", "0.7385599", "0.7360604", "0.73488915", "0.7340167", "0.733761", "0.73363006", "0.73190004", "0.727363", "0.72656757", "0.7243973", "0.72436404", "0.72436404" ]
0.8375519
0
return the client name given a subversion string
def subverParseClient(s): return s[1:].split(":")[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def svn_client_ctx_t_client_name_get(svn_client_ctx_t_self): # real signature unknown; restored from __doc__\n return \"\"", "def get_client_name(self, obj):\n\t\treturn obj.client.name", "def get_client_name(self, client):\n info = self.clientmap[client]\n host, name = info[0][0], info[1]\n return '@'.join((name, host))", "def name(self):\n return \"{} {}\".format(self._clientname, self._name)", "def name(self):\n return f\"{self._client.friendly_name} {CLIENT_SUFFIX}\"", "def svn_client_ctx_t_client_name_set(svn_client_ctx_t_self, char_client_name): # real signature unknown; restored from __doc__\n pass", "def fhir_version_name(fhir_version):\n major_version = int(fhir_version.split('.')[0])\n\n if major_version < 3:\n return 'dstu2'\n elif (major_version >= 3) and (major_version < 4):\n return 'stu3'\n elif (major_version >= 4) and (major_version < 5):\n return 'r4'\n else:\n raise Exception(\n f'Invalid fhir version supplied: {fhir_version}! No name exists '\n 'for the supplied fhir version.'\n )", "def _branch_name(cls, version: Version) -> str:\n suffix = version.public[len(version.base_version) :]\n components = version.base_version.split(\".\") + [suffix]\n if suffix != \"\" and not (\n suffix.startswith(\"rc\")\n or suffix.startswith(\"a\")\n or suffix.startswith(\"b\")\n or suffix.startswith(\".dev\")\n ):\n raise ValueError(f\"Unparseable pants version number: {version}\")\n return \"{}.{}.x\".format(*components[:2])", "def get_sub_name(self):\n return self.sub_name", "def svn_client_info(char_path_or_url, svn_opt_revision_t_peg_revision, svn_opt_revision_t_revision, svn_info_receiver_t_receiver, svn_boolean_t_recurse, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def preferred_client_name(self):\n return self.secondary_client_name() or self.client_name()", "def name(self):\n return f\"{self.client_name} {self._name}\"", "def name(self):\n return f\"{self.client_name} {self._name}\"", "def slicename_to_hostname(vs_name):\n fields = vs_name.split('_')\n if len(fields) == 1:\n prefix = vs_name\n else:\n # The vs_name prefix is the PlanetLab site name.\n # The rest is user-chosen. Place the site name after user-chosen name.\n prefix = '.'.join(fields[1:] + [fields[0]])\n return '%s.%s' % (prefix, _root_hostname)", "def svn_branch():\n return svn_url().split('/')[-1]", "def version_name(self) -> str:\n return pulumi.get(self, \"version_name\")", "def subscription_name_from_path(path, project):\n return _name_from_project_path(path, project, _SUBSCRIPTION_TEMPLATE)", "def test_name(self):\n result = self.test_client.name\n\n assert result == \"Evgenii Kryuchkov\"", "def _get_client_info():\n client = get_distribution('reportportal-client')\n return client.project_name, client.version", "def get_version_string():\n vl = TopicTreeExtractCVS.get_version_number()\n\n return '''TopicTreeExtractCVS {0}.{1}.{2}\nNew BSD License.\nCopyright (C) 2017 Hitoshi Yamauchi\n'''.format(vl[0], vl[1], vl[2])", "def name(self):\n return '{} {}'.format(self.client_name, self.variable)", "def client(self) -> str:\n return pulumi.get(self, \"client\")", "def svn_client_uuid_from_url(char_uuid, char_url, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def name(self):\n return f\"{self.client_name} {self.variable}\"", "def get_svn_version():\n return crds.__version__", "def get_raw_server_name():\n from google.appengine.api import app_identity\n return '%s.%s.appspot.com' % (os.environ[\n 'CURRENT_VERSION_ID'].split('.')[0], app_identity.get_application_id())", "def as_namespace_name(name, version):\n return name + ':' + version", "def svn_client_get_username_provider(svn_auth_provider_object_t_provider, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def subnetwork_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"subnetwork_name\")", "def svn_client_url_from_path(char_url, char_path_or_url, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass" ]
[ "0.7523611", "0.666468", "0.61638457", "0.598154", "0.5943123", "0.58753544", "0.5852146", "0.57407033", "0.57030475", "0.56988454", "0.56832063", "0.5635375", "0.5635375", "0.5609268", "0.552585", "0.5525318", "0.54745305", "0.54636943", "0.5447733", "0.54427", "0.54276645", "0.5413397", "0.538821", "0.53741485", "0.5364464", "0.5361984", "0.53537875", "0.53318846", "0.53160006", "0.53129864" ]
0.7026395
1
Verify that every passed node is interconnected with all the other clients
def verifyInterconnect(nodes, clientTypes=clientSubvers): for n in nodes: connectedTo = set() myclient = subverParseClient(n.getnetworkinfo()["subversion"]) pi = n.getpeerinfo() for p in pi: connectedTo.add(subverParseClient(p["subver"])) notConnectedTo = clientTypes - connectedTo notConnectedTo.discard(myclient) if notConnectedTo: print("Client %s is not connected to %s" % (myclient, str(notConnectedTo))) assert(len(notConnectedTo) == 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_connection_is_established(self):\n assert self.connection_node_1.is_connected is True\n assert self.connection_node_2.is_connected is True\n assert self.connection_client_1.is_connected is True\n assert self.connection_client_2.is_connected is True", "def checkonly(self):\n OTHER_WSREP.append(socket.gethostbyname(socket.gethostname()))\n for hostitem in ALL_NODES:\n checkhost(hostitem)\n if OTHER_WSREP:\n for wsrepitem in OTHER_WSREP:\n REMAINING_NODES.append(wsrepitem)\n if REMAINING_NODES:\n for wsrephost in OTHER_WSREP:\n checkwsrep(wsrephost)\n print ''", "def test_connection_is_established(self):\n assert self.connection_client_1.is_connected is True\n assert self.connection_client_2.is_connected is True", "def testEveryNodeRepliesWithNoFaultyNodes(looper, client1, replied1):\n\n def chk():\n receivedReplies = getRepliesFromClientInbox(client1.inBox,\n replied1.reqId)\n print(receivedReplies)\n assert len(receivedReplies) == nodeCount\n\n looper.run(eventually(chk))", "def are_connected(self, name1, name2):", "def check_connected(self, update=True):\n # update if needed\n if update:\n\n self.update_neighbors()\n\n # go through each node checking that each degree id greater than 0\n for node in self.nodes:\n\n # only one node needs to be disconnected to fail\n if len(self.nodes[node].neighbors) < 1:\n return False\n\n return True", "def testReplyWhenRepliesFromAllNodesAreSame(looper, client1, wallet1):\n request = sendRandomRequest(wallet1, client1)\n looper.run(\n eventually(checkResponseRecvdFromNodes, client1,\n nodeCount, request.reqId,\n retryWait=1, timeout=20))\n checkResponseCorrectnessFromNodes(client1.inBox, request.reqId, F)", "def test_connection_is_established(self):\n for conn in self.connections:\n assert conn.is_connected is True", "def test_check_workers(self):\r\n\r\n workers, client_sockets = self._setup_server_and_clients()\r\n\r\n self.assertTrue(check_workers(workers, client_sockets))\r\n\r\n # Now close and terminate a client, wait and check again\r\n client_sockets[0].close()\r\n self.server_socket.close()\r\n sleep(1)\r\n self.assertFalse(check_workers(workers, client_sockets))", "async def test_node_many(self):\n node_list = [Node() for i in range(100)]\n\n # check uids are unique\n uid_list = [n.uid() for n in node_list]\n self.assertEqual(len(uid_list), len(set(uid_list)))\n\n running_list = []\n try:\n for n in node_list:\n # create node and join upon random node in network\n await n.run_node()\n if len(running_list) > 0:\n ct_n, _ = random.choice(running_list)\n await n.join_network(ct_n.nid())\n\n msg = Network_Message()\n n.attach_broadcast_callback(msg.msg_callback)\n n.attach_direct_message_callback(msg.msg_callback)\n running_list.append((n, msg))\n\n for i in range(0, 3):\n await asyncio.sleep(constants.SHU_TIME + 5)\n\n while len(running_list) > 1:\n # broadcast and check all nodes received message\n await running_list[0][0].send_broadcast('hello world')\n for node, msg in running_list[1:]:\n msg_suc = await msg.wait_msg(15)\n self.assertTrue(msg_suc)\n self.assertEqual(msg.nid, running_list[0][0].nid())\n self.assertEqual(msg.msg, 'hello world')\n\n # kill half of the nodes\n kill_list = running_list[:len(running_list) // 2]\n for n, _ in kill_list:\n n.exit_node()\n await asyncio.gather(*[n.wait_stopped() for n, _ in kill_list])\n\n new_list = running_list[len(running_list) // 2:]\n for _, msg in new_list:\n msg.reset()\n\n running_list = new_list\n await asyncio.sleep(constants.SHU_TIME + 5)\n finally:\n for n in node_list:\n n.exit_node()\n await asyncio.gather(*[n.wait_stopped() for n in node_list])", "def is_connected(self) -> bool:", "def TODO_testSharedServerConns(self):\n return \"TODO: getting nondetermistic behavior here due to retry feature\"\n\n self.assertEqual(len(self.clients), 0)\n self.assertEqual(len(self.mock_server().sessions), 0)\n\n large_num_clients = 30\n for i in range(0, large_num_clients):\n self.client_connect(i)\n self.client_send('get fromClient' + str(i) + '\\r\\n', i)\n\n self.assertEqual(len(self.clients), large_num_clients)\n\n self.wait(10)\n\n self.assertTrue(len(self.mock_server().sessions) < large_num_clients)\n self.assertTrue(len(self.mock_server().sessions) > 0)", "def isConnected():", "def test_connected_ids():\n opts = {\n \"publish_port\": 4505,\n \"detect_remote_minions\": False,\n \"minion_data_cache\": True,\n }\n minion = \"minion\"\n ips = {\"203.0.113.1\", \"203.0.113.2\", \"127.0.0.1\"}\n mdata = {\"grains\": {\"ipv4\": ips, \"ipv6\": []}}\n patch_net = patch(\"salt.utils.network.local_port_tcp\", return_value=ips)\n patch_list = patch(\"salt.cache.Cache.list\", return_value=[minion])\n patch_fetch = patch(\"salt.cache.Cache.fetch\", return_value=mdata)\n ckminions = salt.utils.minions.CkMinions(opts)\n with patch_net, patch_list, patch_fetch:\n ret = ckminions.connected_ids()\n assert ret == {minion}", "def validate_connection(self):\n for hostInfo in self.client.transport.hosts:\n host = hostInfo.get('host')\n port = hostInfo.get('port')\n self.validate_server_connection(host, port)", "def test_same_node_is_reachable(self):\n # G is an arbitrary tournament on ten nodes.\n G = DiGraph(sorted(p) for p in combinations(range(10), 2))\n assert_true(all(is_reachable(G, v, v) for v in G))", "def Connected(self) -> bool:", "def Connected(self) -> bool:", "def test_add_incoming_connection():\n center = Coordinates(4, 4)\n radius = 10\n\n i = Intersection(center, radius, 15)\n\n empty_connections = i.get_connections()\n\n assert not empty_connections\n\n start1 = Coordinates(1,2)\n end1 = Coordinates(9, 10)\n len1 = 10\n in_ln1 = 3\n out_ln1 = 4\n ang1 = math.pi/2\n\n start2 = Coordinates(5, 6)\n end2 = Coordinates(12, 14)\n len2 = 15\n in_ln2 = 5\n out_ln2 = 1\n ang2 = math.pi/4\n\n start3 = Coordinates(7, 8)\n end3 = Coordinates(10, 12)\n len3 = 20\n in_ln3 = 25\n out_ln3 = 27\n ang3 = 3 * math.pi / 2\n\n r1 = Road(start1, end1, len1, out_ln1, in_ln1, ang1, 20, 'Test')\n r2 = Road(start2, end2, len2, out_ln2, in_ln2, ang2, 25, 'Test')\n r3 = Road(start3, end3, len3, out_ln3, in_ln3, ang3, 30, 'Test')\n\n add_incoming_connection(i, r1)\n\n non_empty = i.get_connections()\n\n assert non_empty\n\n assert non_empty[0].get_length() == 10\n assert non_empty[0].get_angle() == ang1\n assert non_empty[0].get_in_lanes() == in_ln1\n assert non_empty[0].get_out_lanes() == out_ln1\n\n add_incoming_connection(i, r3)\n\n assert len(i.get_connections()) == 2\n\n assert non_empty[0].get_length() == 10\n assert non_empty[0].get_angle() == ang1\n assert non_empty[0].get_in_lanes() == in_ln1\n assert non_empty[0].get_out_lanes() == out_ln1\n assert non_empty[1].get_length() == len3\n assert non_empty[1].get_angle() == ang3\n assert non_empty[1].get_in_lanes() == in_ln3\n assert non_empty[1].get_out_lanes() == out_ln3\n\n add_connection(i, math.pi, 20, 21, 22, 40, 'Test')\n\n assert len(i.get_connections()) == 3\n assert non_empty[0].get_length() == 10\n assert non_empty[0].get_angle() == ang1\n assert non_empty[0].get_in_lanes() == in_ln1\n assert non_empty[0].get_out_lanes() == out_ln1\n assert non_empty[1].get_length() == len3\n assert non_empty[1].get_angle() == ang3\n assert non_empty[1].get_in_lanes() == in_ln3\n assert non_empty[1].get_out_lanes() == out_ln3\n assert non_empty[2].get_length() == 20\n assert non_empty[2].get_angle() == math.pi\n assert non_empty[2].get_in_lanes() == 21\n assert non_empty[2].get_out_lanes() == 22\n\n add_incoming_connection(i, r2)\n\n assert len(i.get_connections()) == 4\n assert non_empty[0].get_length() == 10\n assert non_empty[0].get_angle() == ang1\n assert non_empty[0].get_in_lanes() == in_ln1\n assert non_empty[0].get_out_lanes() == out_ln1\n assert non_empty[1].get_length() == len3\n assert non_empty[1].get_angle() == ang3\n assert non_empty[1].get_in_lanes() == in_ln3\n assert non_empty[1].get_out_lanes() == out_ln3\n assert non_empty[2].get_length() == 20\n assert non_empty[2].get_angle() == math.pi\n assert non_empty[2].get_in_lanes() == 21\n assert non_empty[2].get_out_lanes() == 22\n assert non_empty[3].get_length() == 15\n assert non_empty[3].get_angle() == ang2\n assert non_empty[3].get_in_lanes() == in_ln2\n assert non_empty[3].get_out_lanes() == out_ln2", "def verify_intervlan_routing(self):\n for src in self.host_information:\n for dst in self.host_information:\n if dst > src:\n self.check_host_connectivity_by_id(src, dst)", "def _check_all_replicas_connected(num_replicas, gateway_port, protocol):\n exec_ids = set()\n exec_id_list = []\n for i in range(num_replicas + 1):\n id_ = _send_request(gateway_port, protocol, request_size=2)[0].text\n exec_ids.add(id_)\n exec_id_list.append(id_)\n print(exec_id_list)\n assert len(exec_ids) == num_replicas", "def isconnected(self) -> bool:", "def callback(self):\n server_addresses = self._address_book.list_by_key(key)\n for address in server_addresses:\n if self._client_logic.connection_error.is_set():\n try:\n connection = socket.create_connection((address[0], 9665))\n self.sident_verify(connection, v_event)\n except socket.error:\n continue\n else:\n return True\n neighbor_addresses = self._client_list.list()\n for address in neighbor_addresses:\n if self._client_logic.connection_error.is_set():\n try:\n connection = socket.create_connection((address[0], address[1]))\n \n\n def sident_verify(self, connection):\n \"\"\"Request the server send a signed verification of its identity with \n IP address, port and timestamp.\n\n sident stands for 'Server Identity'\n\n An sident_verify message is of the following form:\n\n {'type':'sident_verify'\n 'timestamp':<UNIX TIMESTAMP>}\n\n The server should reply with an sident_response message which is of\n the following form:\n\n {'type':'sident_response',\n 'ip_addr':<IP ADDRESS AS A STRING>,\n 'port':<PORT NUMBER AS AN INTEGER>,\n 'timestamp':<UNIX TIMESTAMP>,\n 'signature':<SIGNED DIGEST OF THE THREE PREVIOUS VALUES AS A UTF-8 STRING \n CONCATENATED TOGETHER WITH COMMA SEPERATORS>}\"\"\"\n sident_verify_msg = {'type':'sident_verify',\n 'timestamp':calendar.timegm(time.gmtime())}\n self._send_queue.put((sident_verify_msg, connection))\n return True\n\n def request_server_address(self, connection):\n \"\"\"Request the best guess at the current server address from a client\n peer. \n\n P2P nodes use the same JSON messaging style as the normal client and\n server. address_request messages are of the form:\n\n {'type':'address_request'\n 'timestamp':<UNIX TIMESTAMP>}\n\n And a server_address message is of the form:\n\n {'type':'server_address',\n 'key':<CRYPTOGRAPHIC KEY THAT UNIQUELY IDENTIFIES SERVER>,\n 'address':<SERVER ADDRESS>,\n 'port':<WHAT PORT THE SERVER LISTENS ON>,\n 'address_timestamp':<UNIX TIMESTAMP OF WHEN PEER RECEIVED ADDRESS>,\n 'signature':<VERIFICATION THAT INFORMATION CAME FROM SERVER ORIGINALLY>,\n 'timestamp':<UNIX TIMESTAMP OF WHEN MESSAGE WAS SENT>}\"\"\"\n address_request = {'type':'sident_verify',\n 'timestamp':calendar.timegm(time.gmtime())}\n self._send_queue.put((address_request, connection))\n return True\n \n\n def send_loop(self):\n \"\"\"Send loop that is meant to be started from a seperate thread of \n execution. The send loop pulls 'raw' python object messages from this \n objects send_queue attribute and converts them to json strings before \n encoding them as utf-8 to send across the wire. Sent along with the \n message is the connection to send it on.\n\n Responses are handled and received by the receive_loop method of this class\n which is ran in a seperate thread of execution.\"\"\"\n while not self._shutdown.is_set():\n message_tuple = self._send_queue.get()\n message = message_tuple[0]\n message_length = self._calculate_recursive_length(message)\n wrapped_message = [message_length, message]\n wire_message = (json.dumps(wrapped_message) + \"\\r\\n\\r\\n\").encode('utf-8')\n message_tuple[1].sendall(wire_message)\n return True\n\n def receive_loop(self):\n \"\"\"Receive loop that is meant to be started from a seperate thread of\n execution. The receive loop takes in 'raw' utf-8 json messages from the\n wire and decodes them, then interprets them to produce native python \n objects. The resulting objects are then handled by a method of this class\n of the form handle_<message_type>. For example if a message with the \n 'type' key 'test' came in like so:\n\n {'type':'test'}\n\n The method self.handle_test(message) would be called with the message\n dictionary object passed along.\n \"\"\"\n msg_buffer = bytes() # The message input buffer\n while not self._shutdown.is_set():\n if msg_buffer:\n try:\n msg_length = self.determine_length_of_json_msg(msg_buffer)\n except InvalidLengthHeader:\n msg_length = float(\"inf\")\n if len(msg_buffer) >= msg_length:\n message = self.extract_msg(msg_buffer, msg_length)\n try:\n handler = getattr(self, \"handle_\" + message['type'])\n except AttributeError:\n print(\"Can't handle message of type: \" +\n str(message['type']))\n continue\n handler(message)\n msg_buffer = msg_buffer[msg_length:]\n else:\n try:\n msg_buffer += connection.recv(1024)\n except socket.timeout:\n pass\n else:\n try:\n msg_buffer += connection.recv(1024)\n except socket.timeout:\n pass\n \n def handle_sident_response(message):\n \"\"\"Handle an sident_response type message of the form:\n \n {'type':'sident_response',\n 'ip_addr':<IP ADDRESS AS A STRING>,\n 'port':<PORT NUMBER AS AN INTEGER>,\n 'timestamp':<UNIX TIMESTAMP>,\n 'signature':<SIGNED DIGEST OF THE THREE PREVIOUS VALUES AS A UTF-8 STRING \n CONCATENATED TOGETHER WITH COMMA SEPERATORS>}\n \n The handler verifies that the information given by the server is properly\n signed, then adds the information to address books/etc, and finally \n resolves the issue using provided client logic methods and clears the \n error indicator.\"\"\"\n if self._client_logic.connection_error.is_set():\n try:\n ip_addr = message['ip_addr']\n port = message['port']\n timestamp = message['timestamp']\n signature = message['signature']\n except KeyError:\n return False\n sha_hash = SHA256.new(\n (ip_addr + \",\" + port + \",\" + timestamp).encode('utf-8'))\n if self._key.verify(sha_hash.digest(), signature):\n self._address_book.add_address(self._key, ip_addr, timestamp,\n signature, port=port)\n self._address_book.save()\n if self._client_logic.reconnect(ip_addr, port):\n self._client_logic.connection_error.clear()\n return True\n else:\n return False\n else:\n return False\n\n \n def determine_length_of_json_msg(self, message_bytes):\n \"\"\"Incrementally parse a JSON message to extract the length header.\n\n message_bytes: The bytes that represent the portion of the message \n recieved.\n \"\"\"\n # All messages must be written in utf-8\n message = message_bytes.decode('utf-8')\n # Check that the message we have been given looks like a valid length header\n if \",\" not in message:\n raise InvalidLengthHeader(message)\n length_portion = message.split(\",\")[0]\n left_bracket = length_portion[0] == \"[\"\n number_before_comma = length_portion[-1] in \"1234567890\"\n if left_bracket and number_before_comma:\n for character in enumerate(length_portion):\n if character[1] not in \"[ \\n\\t\\r1234567890,\":\n raise InvalidLengthHeader(length_portion)\n elif character[1] in \"1234567890\":\n length_start = character[0]\n return int(length_portion[length_start:])\n elif left_bracket:\n raise InvalidLengthHeader(length_portion)\n else:\n raise MissingLengthHeader(length_portion)\n return False\n\n def extract_msg(self, msg_buffer, length):\n message = msg_buffer[:length].decode()\n try:\n right_curly_bracket = message[-6] == \"}\" or message[-2] == \"}\"\n except IndexError:\n print(message, msg_buffer, length)\n valid_delimiter = message[-6:] == \"}]\\r\\n\\r\\n\"\n if right_curly_bracket and valid_delimiter:\n return message\n elif right_curly_bracket:\n raise InvalidMessageDelimiter(message)\n else:\n raise MissingMessageDelimiter(message)\n\n def _calculate_recursive_length(self, msg_dict):\n \"\"\"Calculate the length of a dictionary represented as JSON once a length\n field has been added as a key.\"\"\"\n delimiter = \"\\r\\n\\r\\n\"\n initial_length = len(\n json.dumps(msg_dict) + delimiter)\n initial_list = [initial_length, msg_dict]\n recursive_length = len(\n json.dumps(initial_list) + delimiter)\n recursive_list = [recursive_length, msg_dict]\n while len(json.dumps(recursive_list) + delimiter) != recursive_list[0]:\n recursive_length = len(\n json.dumps(recursive_list) + delimiter)\n recursive_list = [recursive_length, msg_dict]\n return recursive_list[0]", "def test_already_connected(connection, events, writer, schedule, flush):\n schedule(connection.connect(), connection.connect())\n flush()\n assert not writer.closed\n assert events.triggered(\"CLIENT_CONNECT\")", "def is_peer_connected(mnode, servers):\n if isinstance(servers, str):\n servers = [servers]\n else:\n servers = servers[:]\n\n if mnode in servers:\n servers.remove(mnode)\n\n peer_status_list = get_peer_status(mnode)\n if peer_status_list is None:\n g.log.error(\"Failed to parse the peer status. Hence failed to \"\n \"validate the peer connected state.\")\n return False\n if peer_status_list == []:\n g.log.error(\"No peers present in the pool. Servers are not yet \"\n \"connected.\")\n return False\n\n # Convert all hostnames to ip's\n server_ips = []\n for server in servers:\n server_ips.append(socket.gethostbyname(server))\n\n is_connected = True\n for peer_stat in peer_status_list:\n if socket.gethostbyname(peer_stat['hostname']) in server_ips:\n if (re.match(r'([0-9a-f]{8})(?:-[0-9a-f]{4}){3}-[0-9a-f]{12}',\n peer_stat['uuid'], re.I) is None):\n g.log.error(\"Invalid UUID for the node '%s'\",\n peer_stat['hostname'])\n is_connected = False\n if (peer_stat['stateStr'] != \"Peer in Cluster\" or\n peer_stat['connected'] != '1'):\n g.log.error(\"Peer '%s' not in connected state\",\n peer_stat['hostname'])\n is_connected = False\n\n if not is_connected:\n return False\n\n peer_ips = [socket.gethostbyname(peer_stat['hostname']) for\n peer_stat in peer_status_list]\n if not (set(server_ips).issubset(peer_ips)):\n servers_not_in_pool = list(set(server_ips).difference(peer_ips))\n for index, server in enumerate(servers_not_in_pool):\n if not (server in servers):\n servers_not_in_pool[index] = socket.gethostbyaddr(server)[0]\n g.log.error(\"Servers: '%s' not yet added to the pool.\",\n servers_not_in_pool)\n return False\n\n g.log.info(\"Servers: '%s' are all 'Peer in Cluster' and 'Connected' \"\n \"state.\", servers)\n return True", "def test_all_servers_connection():\n task_data = dict(const.TEST_TASK)\n task_data[\"client_list\"] = list()\n agents = models.Agent.objects.all()\n for agent in agents:\n task_data[\"client_list\"].append({\"id\": agent.id, \"ip_address\": agent.ip_address})\n message_queue.push_task(task_data)\n logger.info(\"create tasks to test all agents' connection status\")", "def update_clients():\n if rabbit.leader_node_is_ready() or rabbit.client_node_is_ready():\n for rid in relation_ids('amqp'):\n for unit in related_units(rid):\n amqp_changed(relation_id=rid, remote_unit=unit)", "def isconnected(self) -> bool:\n ...", "def multiple_clients():\n server = start_server()\n clients = [start_client(port=str(p)) for p in range(11110, 11114)]\n test_strs = [make_random(100) for _ in range(4)]\n\n # Write to all clients. Expect the server to receive all their output.\n for i, client in enumerate(clients):\n write_to(client, test_strs[i])\n time.sleep(1)\n if not read_from(server) == test_strs[i]:\n return False\n return True", "def test_neo4j_connected(self):\n\t\tclient = Client()\n\t\tresponse = client.post(self.uri_add_selectable+self.bob_the_socialaware_twin_profile.token+'/',\n\t\t content_type='application/json', data=page_one_objects(self.bob_the_socialaware_twin_auth, 0))\n\t\tstatus = json.loads(response.content.decode('utf-8'))['inter']\n\t\tself.assertNotEqual(status,'e_neo4j_Disconnected')" ]
[ "0.68466765", "0.6563393", "0.6492229", "0.64650893", "0.6449981", "0.6430157", "0.6389443", "0.6318689", "0.63007426", "0.6234157", "0.6223971", "0.61432195", "0.6124322", "0.61007655", "0.6083266", "0.60621226", "0.6053421", "0.6053421", "0.6020111", "0.59973615", "0.59776235", "0.5974072", "0.5952334", "0.5939801", "0.59303313", "0.5897986", "0.5896073", "0.58870095", "0.58754426", "0.5872189" ]
0.77593154
0
Check if a cookie is expired.
def is_cookie_expired(cookie_name): if cookie_name: expires = int timestamp = int(time.time()) for cookie in __request_session.cookies: if cookie.name == cookie_name: expires = cookie.expires else: return None if timestamp > expires: log.debug('cookie[\'%s\'] is expired. time stamp: %s, expires: %s' % (cookie_name, timestamp, expires)) return True log.debug('cookie[\'%s\'] is not expired. time stamp: %s, expires: %s' % (cookie_name, timestamp, expires)) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_cookie_expired(cookie):\n now = int(round(time() * 1000))\n expire = cookie[u't']\n return now > expire", "def expired(self):\n return int(time.time()) > self.expires_at", "def has_expired(self):\n self.ensure_one()\n return datetime.now() > fields.Datetime.from_string(self.expires)", "def is_expired(self):\n return timeutils.utcnow_ts() > self.expire_ts", "def is_expired(self):\n\n return time.time() * 1000 - self._refreshed_on > self._expire", "def is_expired(self):\n return utcnow() >= self.expires", "def is_expired(self) -> bool:\n return now() > self.expires", "def is_expired(self):\n if not self.is_signed:\n return True\n return int(self._token_claims.get(self.__class__.exp_claim, 0)) < int(\n time.time()\n )", "def _is_expired(self):\n current_time = datetime.now()\n if (current_time > self._expires_at):\n logging.debug('token expired')\n return True\n else:\n return False", "def isExpired(self):\n return True/False", "def is_expired(self):\n return self.expiration_date <= self._now()", "def is_expired(self):\n return int(time.time()) - self.time > self.interval", "def is_expired(self):\n if self.access_token is None:\n logging.debug('Access token not found')\n return True\n else:\n return (self.expiration <= datetime.now())", "def token_is_expired(self):\n # type: () -> bool\n token = self.token\n if not token:\n return False\n\n return token[\"expires_at\"] < time()", "def _verify_session_cookies(self):\n if not self.session.cookies:\n return False\n for cookie_name in LOGIN_COOKIES:\n if cookie_name not in list(self.session.cookies.keys()):\n LOG.error('The cookie \"{}\" do not exist, it is not possible to check the expiration',\n cookie_name)\n return False\n for cookie in self.session.cookies.jar:\n if cookie.name != cookie_name:\n continue\n if cookie.expires <= int(time.time()):\n LOG.info('Login is expired')\n return False\n return True", "def is_expired(self):\n\n if self._lifetime is not None and self._lifetime > 0:\n # 300 seconds waite is the tolerance !\n # The unit of lifetime is millisecond\n if (time.time() - self._create_date) * 1000 > self._lifetime + 300000:\n return True\n\n return False", "def has_expired(self):\n if not self._initialized:\n return True\n\n expires_in = self.expires_in\n if expires_in > 0:\n return False\n else:\n return True", "def expired(self):\n\n return self.getNotAfter() <= rpki.sundial.now()", "def expired(self): # pragma: no cover\n return self._state in (_State.EXPIRING, _State.EXPIRED)", "def expired(self) -> bool:\n if not self.use_wts:\n return False\n\n return datetime.now() > self.expire", "def is_expired(self):\n delta = datetime.datetime.now() - self.created_at\n\n return delta.total_seconds() > 15*60", "def _has_expired(self):\r\n expired = False\r\n if hasattr(self, 'Expiration'):\r\n now = datetime.datetime.utcnow()\r\n expiration = datetime.datetime.strptime(self.Expiration, '%Y-%m-%dT%H:%M:%SZ')\r\n expired = (now >= expiration)\r\n else:\r\n raise ValueError(\"ERROR: Request for expired property, but no Expiration in HIT!\")\r\n return expired", "def is_expired(self):\n return self._bExpired", "def has_expired(self, now):\n if now < self._expires:\n return False\n\n return self._enclave_wait_timer.has_expired()", "def is_expired(self):\n expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\n return (self.date_joined + expiration_date <= datetime.datetime.now())", "def _check_goauth_expiration(self, expiry):\n now = int(time.time())\n time_left = int(expiry) - now\n # 10 days\n min_time_left = 60*60*24*10\n if time_left < min_time_left:\n return False\n else:\n return True", "def is_expired (self, now=None):\n if now is None: now = time.time()\n return self.is_idle_timed_out(now) or self.is_hard_timed_out(now)", "def has_expired(self) -> bool:\n raise NotImplementedError() # pragma: nocover", "def isExpired(self):\n return self.sess is not None and not self.sess.isValid()", "def is_expired(self):\n expiration_date = datetime.timedelta(days=settings.ACCOUNT_ACTIVATION_DAYS)\n\n return (self.user.date_joined + expiration_date <= datetime.datetime.now())" ]
[ "0.8836514", "0.77920187", "0.77391326", "0.7730929", "0.7603923", "0.7591796", "0.75569636", "0.75034326", "0.7462581", "0.7440906", "0.7415246", "0.736596", "0.7357847", "0.73257196", "0.7270349", "0.72035724", "0.7195444", "0.7123442", "0.70624334", "0.6997034", "0.69882977", "0.6935853", "0.6915936", "0.6908242", "0.688803", "0.6878907", "0.68782187", "0.68766737", "0.68253213", "0.68159187" ]
0.8001271
1
Check a cookie by name to see if it exist.
def has_cookie(cookie_name): if cookie_name in __request_session.cookies: log.debug('cookie found: %s' % __request_session.cookies[cookie_name]) return __request_session.cookies[cookie_name] log.debug('no cookie named: %s found.' % cookie_name) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_cookie(self, name, domain):\n for cookie in self._cookiejar:\n if cookie.name == name and cookie.domain == domain:\n if cookie.is_expired():\n break\n return cookie", "def driver_has_cookie(self, cookie_name, timeout=3):\n try:\n wait = WebDriverWait(self.web_driver, timeout)\n wait.until(WaitUntilBrowserHasCookieCondition(cookie_name))\n return True\n except TimeoutException:\n return False", "def read_secure_cookie(self, name):\n\n cookie_val = self.request.cookies.get(name)\n return cookie_val and check_secure_val(cookie_val)", "def read_secure_cookie(self, name):\n cookie_val = self.request.cookies.get(name)\n return cookie_val and check_secure_val(cookie_val)", "def read_secure_cookie(self, name):\n cookie_val = self.request.cookies.get(name)\n return cookie_val and check_secure_val(cookie_val)", "def get_cookie(self, name):\n return self.cookies.get(name)", "def get_cookie( name, default=None ):", "def has_valid_cookie(self):\r\n try:\r\n parsed_url = urlparse(self.url)\r\n host = parsed_url[1]\r\n path = parsed_url[2] or '/'\r\n\r\n # Cookie files don't store port numbers, unfortunately, so\r\n # get rid of the port number if it's present.\r\n host = host.split(':')[0]\r\n\r\n self.debug('Looking for \"%s %s\" cookie in %s' %\r\n (host, path, self.cookie_file))\r\n self.cookie_jar.load(self.cookie_file, ignore_expires=True)\r\n\r\n try:\r\n cookie = self.cookie_jar._cookies[host][path]['rbsessionid']\r\n\r\n if not cookie.is_expired():\r\n self.debug('Loaded valid cookie -- no login required')\r\n return True\r\n\r\n self.debug('Cookie file loaded, but cookie has expired')\r\n except KeyError:\r\n self.debug('Cookie file loaded, but no cookie for this server')\r\n except IOError, error:\r\n self.debug('Couldn\\'t load cookie file: %s' % error)\r\n\r\n return False", "def cookie(self, name, default=None):\r\n return self._get_cookies().get(name, default)", "def is_cookie_expired(cookie_name):\n if cookie_name:\n expires = int\n timestamp = int(time.time())\n for cookie in __request_session.cookies:\n if cookie.name == cookie_name:\n expires = cookie.expires\n else:\n return None\n if timestamp > expires:\n log.debug('cookie[\\'%s\\'] is expired. time stamp: %s, expires: %s' %\n (cookie_name, timestamp, expires))\n return True\n log.debug('cookie[\\'%s\\'] is not expired. time stamp: %s, expires: %s' %\n (cookie_name, timestamp, expires))\n return False", "def exists(self, name):\n return name in self.cache", "def get_cookie(self, name, value=None):\n try:\n return cherrypy.request.cookie[name].value\n except KeyError:\n return value", "def check_cookies(self):\r\n try:\r\n driver = self.driver\r\n my_file = open(\"CookiesFb.pkl\")\r\n return my_file, driver\r\n\r\n except IOError:\r\n print(\"Cookies does not exist. Will create cookies anyway. Wait for a second~\")\r\n #Buka file credential\r\n self.login(accountfacebook.EMAIL, accountfacebook.PWD)\r\n print(\"File cookies created. Next file run will pass login page\")", "def getCookie(key):", "def get_cookies(domname):\n if 'firefox' in udata.srcs:\n cout = get_cookies_firefox(domname)\n elif 'chrome' in udata.srcs:\n cout = get_cookies_chrome(domname)\n else:\n print(\"Error: No cookie source defined. Define either `srcs.firefox` or `srcs.chrome`.\")\n cout = None\n return cout", "def get_cookie_value( cookiejar, name ):\n value = None\n for cookie in cookiejar:\n if cookie.name == name:\n value = cookie.value\n break\n return value", "def exists(profile, name):\n result = fetch_by_name(profile, name)\n return len(result) > 0", "def exists(profile, name):\n result = fetch_by_name(profile, name)\n return len(result) > 0", "def is_cookie_expired(cookie):\n now = int(round(time() * 1000))\n expire = cookie[u't']\n return now > expire", "def load_cookies(self):\n try:\n self.session.cookies.load(ignore_discard=True)\n return True\n except FileNotFoundError:\n return False", "def verify_cookies(self, device):\n self.assertTrue(device.cookies is not None)", "def has_key(self, name):\n return self[name] <> None", "def _verify_session_cookies(self):\n if not self.session.cookies:\n return False\n for cookie_name in LOGIN_COOKIES:\n if cookie_name not in list(self.session.cookies.keys()):\n LOG.error('The cookie \"{}\" do not exist, it is not possible to check the expiration',\n cookie_name)\n return False\n for cookie in self.session.cookies.jar:\n if cookie.name != cookie_name:\n continue\n if cookie.expires <= int(time.time()):\n LOG.info('Login is expired')\n return False\n return True", "def get_secure_cookie( name, value=None ):", "def delete_cookie(cookie_name):\n if cookie_name in __request_session.cookies:\n del __request_session.cookies[cookie_name]\n log.debug('deleting cookie: %s session cookies: %s' % (cookie_name, __request_session.cookies))\n return True\n return False", "def name_exists(self, login):\n\t\treturn login in self.users_by_name", "def _load_cookies(self, filename):\n if not os.path.isfile(filename):\n return False\n\n with open(filename) as f:\n _cookies = pickle.load(f)\n if _cookies:\n jar = cookies.RequestsCookieJar()\n jar._cookies = _cookies\n self.session.cookies = jar\n else:\n return False", "def parse_cookie(name, seed, kaka):\n if not kaka:\n return None\n\n cookie_obj = SimpleCookie(kaka)\n morsel = cookie_obj.get(name)\n\n if morsel:\n parts = morsel.value.split(\"|\")\n if len(parts) != 3:\n return None\n # verify the cookie signature\n sig = cookie_signature(seed, parts[0], parts[1])\n if sig != parts[2]:\n raise SAMLError(\"Invalid cookie signature\")\n\n try:\n return parts[0].strip(), parts[1]\n except KeyError:\n return None\n else:\n return None", "def are_logged_in_cookies_set(request):\n if settings.FEATURES.get('DISABLE_SET_JWT_COOKIES_FOR_TESTS', False):\n cookies_that_should_exist = DEPRECATED_LOGGED_IN_COOKIE_NAMES\n else:\n cookies_that_should_exist = ALL_LOGGED_IN_COOKIE_NAMES\n\n return all(\n cookie_name in request.COOKIES\n for cookie_name in cookies_that_should_exist\n ) and request.COOKIES[settings.EDXMKTG_LOGGED_IN_COOKIE_NAME]", "def findIECookie(domain, cookie):\n try:\n l = _getLocation()\n except Exception, err:\n # Print a debug message\n print \"Error pulling registry key:\", err\n return None\n # Found the key; now find the files and look through them\n f = _getCookieFiles(l, domain)\n if f:\n cookie_re = re.compile('%s\\n(.*?)\\n' % cookie)\n return _findCookie(f, cookie_re)\n else:\n print \"No cookies for domain (%s) found\" % domain\n return None" ]
[ "0.7101593", "0.7086198", "0.6647389", "0.6630075", "0.6630075", "0.6610906", "0.6543777", "0.6334645", "0.62841827", "0.61255693", "0.5999913", "0.5945455", "0.5881533", "0.5805246", "0.5785581", "0.576895", "0.575548", "0.575548", "0.57039577", "0.56893706", "0.5664889", "0.56463194", "0.5629731", "0.56197214", "0.56138605", "0.55749655", "0.5574057", "0.5547294", "0.5540431", "0.5527248" ]
0.74513674
0
Load a genbank file as a Biopython object.
def load_genbank(path): with open(path, 'r') as fd: try: genbank = SeqIO.read(fd, 'genbank') except Exception as err: raise Exception(path + '\t' + str(err)) return genbank
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_genbank(cls, filename):\n\t\tseq_record = SeqIO.read(filename, 'genbank')\n\t\trec = cls(seq_record=seq_record)\n\t\treturn rec", "def readGBK(filename):\r\n gnome_record = SeqIO.read(filename, \"genbank\")\r\n return gnome_record", "def read_genbank(genome_accession_no, genbank_file=None):\n \n if genbank_file:\n print \"reading genbank file %s\" % genbank_file\n seq_record = SeqIO.read(genbank_file, \"genbank\")\n else:\n print \"downloading and parsing genbank file for %s\" % genome_accession_no\n handle = Entrez.efetch(db=\"nucleotide\", rettype=\"gb\",\n retmode=\"text\", id=genome_accession_no)\n seq_record = SeqIO.read(handle, \"gb\")\n handle.close()\n return seq_record", "def gbk_parse(fname):\n fhand = _open_file(gbkfname)\n unk = 1 \n\n for record in SeqIO.parse(fhand, \"genbank\"):\n\n gene_tags = dict()\n tx_tags = collections.defaultdict(list) \n exon = collections.defaultdict(list) \n cds = collections.defaultdict(list) \n mol_type, chr_id = None, None \n\n for rec in record.features:\n\n if rec.type == 'source':\n mol_type = rec.qualifiers['mol_type'][0]\n try:\n chr_id = rec.qualifiers['chromosome'][0]\n except:\n chr_id = record.name \n continue \n\n strand='-'\n strand='+' if rec.strand>0 else strand\n \n fid = None \n try:\n fid = rec.qualifiers['gene'][0]\n except:\n pass\n\n transcript_id = None\n try:\n transcript_id = rec.qualifiers['transcript_id'][0]\n except:\n pass \n\n if re.search(r'gene', rec.type):\n gene_tags[fid] = (rec.location._start.position+1, \n rec.location._end.position, \n strand,\n rec.type,\n rec.qualifiers['note'][0])\n elif rec.type == 'exon':\n exon[fid].append((rec.location._start.position+1, \n rec.location._end.position))\n elif rec.type=='CDS':\n cds[fid].append((rec.location._start.position+1, \n rec.location._end.position))\n else: \n # get all transcripts \n if transcript_id: \n tx_tags[fid].append((rec.location._start.position+1,\n rec.location._end.position, \n transcript_id,\n rec.type))\n # record extracted, generate feature table\n unk = feature_table(chr_id, mol_type, strand, gene_tags, tx_tags, cds, exon, unk)\n \n #break\n fhand.close()", "def from_genbank(\n cls,\n gbk_data: Path,\n output_file: Path = None,\n prefix: str = None,\n nucleotide: bool = False,\n prepend_file_name: bool = False,\n ) -> LabelledFASTA:\n gbk_data = Path(gbk_data)\n if gbk_data.is_dir():\n gbk_files = [gbk_data / f for f in gbk_data.iterdir()]\n else:\n gbk_files = [gbk_data]\n\n if prepend_file_name:\n\n def gbk_name(gbk_file):\n return gbk_file.stem\n\n else:\n\n def gbk_name(gbk_file):\n return\n\n gbk_contigs = [\n (gbk_name(gbk_file), contig)\n for gbk_file in gbk_files\n for contig in SeqIO.parse(gbk_file, \"genbank\")\n ]\n\n if output_file is None:\n output_file = Path(gbk_files[0].parent) / f\"{prefix}sequence_database.fasta\"\n else:\n output_file = Path(output_file)\n\n with open(output_file, \"w+\", encoding=\"UTF-8\") as outfile:\n for gbk_file_name, gbk_contig in gbk_contigs:\n gene_counter = 0\n for feature in gbk_contig.features:\n if \"cds\" in feature.type.lower():\n gene_counter = cls.write_record(\n gbk_contig,\n feature,\n outfile,\n gene_counter,\n gbk_file_name,\n nucleotide,\n )\n return cls(output_file)", "def load_bb(filename):\n in_data = gdal.Open(filename, 0)\n geotransform = in_data.GetGeoTransform()\n nx = in_data.RasterXSize\n ny = in_data.RasterYSize\n return geotransform2bb(geotransform, nx, ny)", "def NCBIreadGBK(accession):\r\n net_handle = Entrez.efetch(db=\"nuccore\",id=str(accession),\r\n rettype='gbwithparts', retmode=\"txt\")\r\n gnome_record=SeqIO.read(net_handle, \"genbank\")\r\n net_handle.close()\r\n return gnome_record", "def from_file(cls, filename):\n biogrf = ''\n ff = ''\n descrp = ''\n atoms = {}\n with open(filename) as f:\n for line in f:\n if line.startswith('BIOGRF'):\n biogrf = line.strip().split()[1]\n elif line.startswith('DESCRP'):\n descrp = line.strip().split()[1]\n elif line.startswith('FORCEFIELD'):\n ff = line.strip().split()[1]\n elif line.startswith(('ATOM', 'HETATM')):\n b = BGFAtom.from_line(line)\n atoms[b.natom] = b\n elif line.startswith('CONECT'):\n natom = int(line[6:12])\n atoms[natom].add_connections_from_line(line)\n else:\n continue\n atoms = sorted(atoms.values(), key=attrgetter('natom'))\n return cls(biogrf, descrp, ff, atoms)", "def genbank_to_gff(gb_file):\n max_size = 1e4\n gff_file = \"%s.gff3\" % os.path.splitext(gb_file)[0]\n if not os.path.exists(gff_file):\n with open(gb_file) as in_handle:\n with open(gff_file, \"w\") as out_handle:\n gb_iterator = SeqIO.parse(in_handle, \"genbank\")\n GFF.write(_filter_features(gb_iterator, max_size),\n out_handle)", "def read_bgen(\n path: PathType,\n metafile_path: Optional[PathType] = None,\n sample_path: Optional[PathType] = None,\n chunks: Union[str, int, Tuple[int, int, int]] = \"auto\",\n lock: bool = False,\n persist: bool = True,\n contig_dtype: DType = \"str\",\n gp_dtype: DType = \"float32\",\n) -> Dataset:\n if isinstance(chunks, tuple) and len(chunks) != 3:\n raise ValueError(f\"`chunks` must be tuple with 3 items, not {chunks}\")\n if not np.issubdtype(gp_dtype, np.floating):\n raise ValueError(\n f\"`gp_dtype` must be a floating point data type, not {gp_dtype}\"\n )\n if not np.issubdtype(contig_dtype, np.integer) and np.dtype(\n contig_dtype\n ).kind not in {\"U\", \"S\"}:\n raise ValueError(\n f\"`contig_dtype` must be of string or int type, not {contig_dtype}\"\n )\n\n path = Path(path)\n sample_path = Path(sample_path) if sample_path else path.with_suffix(\".sample\")\n\n if sample_path.exists():\n sample_id = read_samples(sample_path).sample_id.values.astype(\"U\")\n else:\n sample_id = _default_sample_ids(path)\n\n bgen_reader = BgenReader(path, metafile_path=metafile_path, dtype=gp_dtype)\n\n df = read_metafile(bgen_reader.metafile_path)\n if persist:\n df = df.persist()\n arrs = dataframe_to_dict(df, METAFILE_DTYPE)\n\n variant_id = arrs[\"id\"]\n variant_contig = arrs[\"chrom\"].astype(contig_dtype)\n variant_contig, variant_contig_names = encode_contigs(variant_contig)\n variant_contig_names = list(variant_contig_names)\n variant_position = arrs[\"pos\"]\n variant_allele = da.hstack((arrs[\"a1\"][:, np.newaxis], arrs[\"a2\"][:, np.newaxis]))\n\n call_genotype_probability = da.from_array(\n bgen_reader,\n chunks=chunks,\n lock=lock,\n fancy=False,\n asarray=False,\n name=f\"{bgen_reader.name}:read_bgen:{path}\",\n )\n call_dosage = _to_dosage(call_genotype_probability)\n\n ds: Dataset = create_genotype_dosage_dataset(\n variant_contig_names=variant_contig_names,\n variant_contig=variant_contig,\n variant_position=variant_position,\n variant_allele=variant_allele,\n sample_id=sample_id,\n call_dosage=call_dosage,\n call_genotype_probability=call_genotype_probability,\n variant_id=variant_id,\n )\n\n return ds", "def parse_genbank(email = \"[email protected]\", ref_id = \"NC_045512.2\"):\n ## ============ Fetch genbank record ============ ##\n # Set email \n Entrez.email = email\n # Make handel object \n handle = Entrez.efetch(db=\"nuccore\", id=ref_id, rettype=\"gb\", retmode=\"text\")\n # Save the record -- only extract first record (there should be only one)\n record = next(SeqIO.parse(handle, \"gb\"))\n \n ## ============ Parse genbank record ============ ##\n # Dictionary to hold the open reading frames\n ORFS = dict()\n for feature in record.features:\n # Only extract the coding sequences\n if feature.type == \"CDS\": \n # Special considerations for overlapping ORF\n if feature.qualifiers.get(\"gene\")[0] == \"ORF1ab\":\n # Get the open reading frame that contains the ribosomal slippage\n if \"-1 ribosomal frameshift\" in str(feature.qualifiers.get(\"note\")): \n # Extract the non-overlapping and frameshifted indices\n name = \"ORF1ab\"\n ORFS[name] = feature\n # Get the open reading frame that just contains the 'a' portion\n else:\n # Extract the non-overlapping and frameshifted indices\n name = \"ORF1a\"\n ORFS[name] = feature\n # Iterate ove the remaining trivial CDS \n else:\n # Build the lookup dictionary with the normal sequences\n name = feature.qualifiers.get(\"gene\")[0]\n ORFS[name] = feature\n # Return Lookup dictionary\n return ORFS, record.seq", "def import_gene_assembly(infile):\n deserialized = None\n with open(infile, 'r') as file_handle:\n deserialized = json.load(file_handle, object_hook=decode_assembly)\n return deserialized", "def load_genomes(UTRfilestring, twobitfile):\n\tUTRdict= rph.readindict(open(UTRfilestring, \"rU\"))\n\tgenome= twobitreader.TwoBitFile(twobitfile) # do we actually need to load this in here?\n\treturn UTRdict, genome", "def load_biom_table(table_f):\n return parse_biom_table(table_f)", "def from_file(path):\n\n filename = os.path.basename(path)\n\n base, suffix = os.path.splitext(filename);\n\n if suffix == '.bin':\n g = bgy3d.from_file(path)\n elif suffix == '.m':\n g = contf.m2dat(path)\n else:\n print 'Unknown file suffix.'\n exit()\n\n return g", "def LoadBrain(self, FilePath=\"standard.brn\"):\n Logging.info(\"AI brain loaded from %s\", FilePath)\n self.AI.loadBrain(FilePath)", "def import_idb(self, idb_file):\n self.__run_import_script(file=idb_file, is_bin=False)", "def load(filename):\n return GesFile(filename)", "def import_file(some_genbank, collection):\n with open(some_genbank, 'r') as open_file:\n collection = kv.get_collection(collection)\n\n # Each \"record\" in genbank file is read, corresponds to individual contigs\n for record in SeqIO.parse(open_file, 'gb'):\n current_contig = record.name\n try:\n current_species = record.annotations['source']\n except KeyError:\n name = re.search(r'\\w+\\/(.+)\\.\\w+$', some_genbank)\n current_species = name.group(1)\n \n\n collection.insert_one({\n 'species':current_species,\n 'contig':current_contig,\n 'dna_seq':str(record.seq),\n 'type':'contig'\n })\n\n print \"Importing {}\".format(current_contig)\n ssu_gene = get_16S(record)\n if ssu_gene:\n try:\n locus_tag = ssu_gene[0].qualifiers['locus_tag'][0]\n except KeyError:\n locus_tag = None\n \n parsed_location = kv.get_gene_location(ssu_gene[0].location)\n gene_record = {\n 'species':current_species,\n 'location':{\n 'contig':current_contig,\n 'start':parsed_location[0],\n 'end':parsed_location[1],\n 'strand':parsed_location[2],\n },\n 'locus_tag':locus_tag,\n 'annotation':ssu_gene[0].qualifiers['product'][0],\n 'dna_seq':ssu_gene[1],\n 'type':'16S'\n }\n print \"adding 16S gene!\"\n collection.insert_one(gene_record)\n kv.get_collection('16S').insert_one(gene_record)\n\n for feature in record.features:\n if feature.type == 'CDS':\n parsed_location = kv.get_gene_location(feature.location)\n try:\n locus_tag = feature.qualifiers['locus_tag'][0]\n except KeyError:\n locus_tag = None\n\n gene_record = {\n 'species':current_species,\n 'location':{\n 'contig':current_contig,\n 'start':parsed_location[0],\n 'end':parsed_location[1],\n 'strand':parsed_location[2],\n 'index':None\n },\n 'locus_tag':locus_tag,\n 'annotation':feature.qualifiers['product'][0],\n 'dna_seq':get_dna_seq(feature, record),\n 'aa_seq':feature.qualifiers['translation'][0],\n 'type':'gene'\n }\n collection.insert_one(gene_record)", "def load_bc(self):\r\n\r\n # Open the file and read all the lines.\r\n array = np.loadtxt(self.bc_file)\r\n\r\n # Convert the columns to appropriate type.\r\n self.beta = array[:, 0]\r\n self.code = array[:, 1].astype(int)", "def generate_genome(genbank):\n row = {\n '_key': genbank.id,\n 'name': genbank.name,\n 'description': genbank.description,\n 'molecule_type': genbank.annotations.get('molecule_type', ''),\n 'topology': genbank.annotations.get('topology', ''),\n 'data_file_division': genbank.annotations.get('data_file_division', ''),\n 'date': genbank.annotations.get('date', ''),\n 'accessions': genbank.annotations.get('accessions', []),\n 'sequence_version': genbank.annotations.get('sequence_version', ''),\n 'source': genbank.annotations.get('source', ''),\n 'dbxrefs': genbank.dbxrefs,\n 'organism_name': genbank.annotations.get('organism', ''),\n 'taxonomy': ', '.join(genbank.annotations.get('taxonomy', '')),\n 'comment': genbank.annotations.get('comment', ''),\n 'annotation_data': {}\n }\n annot_data = genbank.annotations.get('structured_comment', {}).get('Genome-Annotation-Data', {})\n for (key, val) in annot_data.items():\n row['annotation_data'][key] = val\n yield row", "def load_gene_dict(reference_genbank_name=\"data/covid-19-genbank.gb\"):\n recs = [rec for rec in SeqIO.parse(reference_genbank_name, \"genbank\")]\n gene_dict = {}\n for rec in recs:\n feats = [feat for feat in rec.features if feat.type == \"CDS\"]\n for feat in feats:\n content = '{}: {}'.format(feat.qualifiers['protein_id'][0], feat.qualifiers['product'][0])\n if feat.qualifiers['product'][0] == 'ORF1a polyprotein':\n continue\n if feat.location_operator == 'join':\n for item in feat.location.parts:\n key = (item.start.position, item.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n else:\n key = (feat.location.start.position, feat.location.end.position)\n if 'translation' in feat.qualifiers:\n seq = feat.qualifiers['translation']\n if len(seq) == 1:\n amino_acid_seq = seq[0]\n gene_dict[key] = (content, amino_acid_seq)\n return gene_dict", "def generate_genome_import_files(genbank_path, output_dir):\n genbank = load_genbank(genbank_path)\n genome_path = os.path.join(output_dir, _genome_vert_name + '.json')\n write_import_file(generate_genome(genbank), genome_path)\n gene_path = os.path.join(output_dir, genbank.id, _gene_vert_name + '.json')\n write_import_file(generate_genes(genbank), gene_path)\n gene_edge_path = os.path.join(output_dir, genbank.id, _gene_edge_name + '.json')\n write_import_file(generate_gene_edges(genbank), gene_edge_path)", "def manual_import_genesis(self, path):\n dtu = DtuLoader.DtuLoader(path)\n fbx_path = dtu.get_fbx_path()\n self.genesis_import(fbx_path, dtu)", "def load_barcodes(self, path):\n self._barcodes = pickle.load(open(path, 'rb'))", "def __init__(self, bc_file):\r\n self.bc_file = bc_file\r\n self.beta = []\r\n self.code = []\r\n self.load_bc()", "def load_gene_ontology(self, file_path):\n\t\tpass", "def load_building_blocks(path):\t\t\n\t#TODO : automatization\n\tbenzene = Building_Block(abbrev=\"B\", num_atoms=6,origin=0, para_pos=3, para_angle=0, meta_pos=4 , meta_angle = -np.pi/3., ortho_pos=5, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/benzene.xyz\")\n\tnapthtalene = Building_Block(abbrev=\"N\", num_atoms=18,origin=0, para_pos=12, para_angle=0., meta_pos=11 , meta_angle = -np.pi/3., ortho_pos=10, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/naphtalene.xyz\")\n\tdbPc1 = Building_Block(abbrev=\"dbPc1\", num_atoms=32,origin=13, para_pos=1, para_angle=0, meta_pos=0 , meta_angle = +np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc1_block.xyz\")\n\tdbPc4 = Building_Block(abbrev=\"dbPc4\", num_atoms=55,origin=22, para_pos=1, para_angle=0, meta_pos=0 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc4.xyz\")\n\tdbPc6 = Building_Block(abbrev=\"dbPc6\", num_atoms=52,origin=17, para_pos=0, para_angle=0, meta_pos=1 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc6.xyz\")\n\tdbPc5 = Building_Block(abbrev=\"dbPc5\", num_atoms=58,origin=12, para_pos=26, para_angle=0, meta_pos=20 , meta_angle = -np.pi/3., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/dbPc5.xyz\")\n\tpseudo_para_naph_PCP = Building_Block(abbrev=\"pseudo-para_naph_PCP\", num_atoms=44,origin=0, para_pos=18, para_angle=0, meta_pos=16 , meta_angle = -np.pi/3, ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/pseudo-para_naph_PCP.xyz\")\n\tline =Building_Block(abbrev=\"line\", num_atoms=4,origin=0, para_pos=1, para_angle=0, meta_pos=1 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=1, path=path+\"/line.xyz\")\n\t#rot=Building_Block(abbrev=\"line\", num_atoms=47,origin=6, para_pos=16, para_angle=0, meta_pos=20 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=2, path=path+\"/rot.xyz\")\n\t#stacked_anth=Building_Block(abbrev=\"stacked_anth\", num_atoms=62,origin=3, para_pos=22, para_angle=0, meta_pos=30 , meta_angle = 0., ortho_pos=0, ortho_angle=-2.*np.pi/3, fixed_left = -1,complexity=2, path=path+\"/stacked_anth.xyz\")\n\t\n\tbuilding_blocks = [benzene,napthtalene,dbPc1,dbPc4,dbPc6, dbPc5,pseudo_para_naph_PCP, line]\n\n\treturn building_blocks", "def load_bnf_file(filepath, repository = None):\r\n linelist = []\r\n with open(filepath,'r') as mlfile:\r\n for line in mlfile:\r\n linelist.append(line)\r\n return strlist_to_production_set(linelist, repository)", "def load_sim(filename):\n return pybamm.load(filename)" ]
[ "0.74134886", "0.67536664", "0.673955", "0.645511", "0.6367303", "0.61472815", "0.6139651", "0.60025644", "0.5941233", "0.5793891", "0.5757002", "0.57405365", "0.5613736", "0.5606458", "0.55976534", "0.55834764", "0.55792767", "0.5574212", "0.5572035", "0.5569845", "0.5569083", "0.5566544", "0.55659604", "0.5555828", "0.55535", "0.5531694", "0.550265", "0.5476917", "0.5471259", "0.54624265" ]
0.7991978
0
Generate all import files for a given genbank file path to an output_dir. Will produce CSV files for each collection (filename = collection name)
def generate_genome_import_files(genbank_path, output_dir): genbank = load_genbank(genbank_path) genome_path = os.path.join(output_dir, _genome_vert_name + '.json') write_import_file(generate_genome(genbank), genome_path) gene_path = os.path.join(output_dir, genbank.id, _gene_vert_name + '.json') write_import_file(generate_genes(genbank), gene_path) gene_edge_path = os.path.join(output_dir, genbank.id, _gene_edge_name + '.json') write_import_file(generate_gene_edges(genbank), gene_edge_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def import_file(some_genbank, collection):\n with open(some_genbank, 'r') as open_file:\n collection = kv.get_collection(collection)\n\n # Each \"record\" in genbank file is read, corresponds to individual contigs\n for record in SeqIO.parse(open_file, 'gb'):\n current_contig = record.name\n try:\n current_species = record.annotations['source']\n except KeyError:\n name = re.search(r'\\w+\\/(.+)\\.\\w+$', some_genbank)\n current_species = name.group(1)\n \n\n collection.insert_one({\n 'species':current_species,\n 'contig':current_contig,\n 'dna_seq':str(record.seq),\n 'type':'contig'\n })\n\n print \"Importing {}\".format(current_contig)\n ssu_gene = get_16S(record)\n if ssu_gene:\n try:\n locus_tag = ssu_gene[0].qualifiers['locus_tag'][0]\n except KeyError:\n locus_tag = None\n \n parsed_location = kv.get_gene_location(ssu_gene[0].location)\n gene_record = {\n 'species':current_species,\n 'location':{\n 'contig':current_contig,\n 'start':parsed_location[0],\n 'end':parsed_location[1],\n 'strand':parsed_location[2],\n },\n 'locus_tag':locus_tag,\n 'annotation':ssu_gene[0].qualifiers['product'][0],\n 'dna_seq':ssu_gene[1],\n 'type':'16S'\n }\n print \"adding 16S gene!\"\n collection.insert_one(gene_record)\n kv.get_collection('16S').insert_one(gene_record)\n\n for feature in record.features:\n if feature.type == 'CDS':\n parsed_location = kv.get_gene_location(feature.location)\n try:\n locus_tag = feature.qualifiers['locus_tag'][0]\n except KeyError:\n locus_tag = None\n\n gene_record = {\n 'species':current_species,\n 'location':{\n 'contig':current_contig,\n 'start':parsed_location[0],\n 'end':parsed_location[1],\n 'strand':parsed_location[2],\n 'index':None\n },\n 'locus_tag':locus_tag,\n 'annotation':feature.qualifiers['product'][0],\n 'dna_seq':get_dna_seq(feature, record),\n 'aa_seq':feature.qualifiers['translation'][0],\n 'type':'gene'\n }\n collection.insert_one(gene_record)", "def _add_output_files(self):\n self._output_files = []\n base = os.path.join(os.path.dirname(self.in_fpath),\n os.path.splitext(os.path.basename(self.in_fpath))[0])\n\n output_path = f'{base}_out.csv'\n\n suffix = 2\n while os.path.exists(output_path):\n self._output_files.append(output_path)\n if os.path.getsize(output_path) < self.limit_fsize:\n return\n output_path = f'{base}_out_{suffix}.csv'\n suffix += 1\n\n open(output_path, 'a').close()\n self._output_files.append(output_path)", "def import_directory_csv(d_in, d_out, target_column, merge_columns):\n\n INPUT_FILES = grab_files(\"*.csv\", d_in)\n\n if not INPUT_FILES:\n logger.warning(\"No matching CSV files found, exiting\")\n exit(2)\n\n for f_csv in INPUT_FILES:\n f_csv_out = os.path.join(d_out, os.path.basename(f_csv))\n vals = (f_csv, f_csv_out, target_column, merge_columns)\n import_csv(vals)", "def process_files(args):\n coll = build_collection(args.data_path, args.include_online_only)\n\n for import_file in args.imports:\n _, ext = os.path.splitext(import_file)\n import_serializer_class = ser_interface.MtgSsmSerializer \\\n .by_extension_and_format(ext, args.import_format)\n import_serializer = import_serializer_class(coll)\n print('Importing counts from import: %s' % import_file)\n import_serializer.read_from_file(import_file)\n\n _, ext = os.path.splitext(args.collection)\n serializer_class = ser_interface.MtgSsmSerializer.by_extension_and_format(\n ext, args.format)\n serializer = serializer_class(coll)\n\n if os.path.exists(args.collection):\n print('Reading counts from existing file.')\n serializer.read_from_file(args.collection)\n backup_name = args.collection + '.bak-{:%Y%m%d_%H%M%S}'.format(\n datetime.datetime.now())\n print('Moving existing collection to backup: %s' % backup_name)\n shutil.move(args.collection, backup_name)\n\n print('Writing collection to file.')\n serializer.write_to_file(args.collection)", "def main(output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n baseurl = 'http://codeandbeer.org/virtual/BigData/Labs/'\n files = ['Booking-20151012-1322.csv', 'Booking-20181025-1232.csv']\n for filename in files:\n r = requests.get(baseurl+filename, stream=True)\n if r.status == 200:\n with open(output_filepath+\"/\"+filename, \"wb\") as f:\n f.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)", "def main():\n\n # Ensure the output directory exists\n if not os.path.exists(OUTPUT_DIR):\n os.makedirs(OUTPUT_DIR)\n\n process_csv()", "def genes_file_creation(input_folder):\n file_paths = {}\n for file_name in os.listdir(input_folder):\n file_paths[file_name] = input_folder + '/' + file_name\n\n df = pa.DataFrame()\n \n for file_name in file_paths:\n df_temp = pa.read_csv(file_paths[file_name], sep='\\t', header=None)\n print(df_temp.columns)\n gene_column = 0\n df_temp = df_temp[[gene_column]]\n df_temp.columns = ['Gene_Name_DE']\n row = []\n file_extension = os.path.splitext(file_name)[1]\n row.append(file_name.replace(file_extension, \"\"))\n row.extend(df_temp['Gene_Name_DE'].tolist())\n df = df.append([row], ignore_index=True)\n\n df.insert(1, 'Description', 'Genes_DE')\n\n df.to_csv('DE_gene.gmt', sep='\\t', index=False, header=False)", "def main():\n for db_csv_export in current_dir.glob(\"template*.csv\"):\n data_projects = load_projects(db_csv_export)\n json_path = db_csv_export.with_suffix(\".json\")\n with open(json_path, \"w\") as fh:\n json.dump(data_projects, fh, indent=2)", "def make_up(self, base_path='./data/'):\n for csv_file_path in [f\"{base_path}{_}\" for _ in os.listdir(base_path)]:\n self.append_file(csv_file_path)", "def create_output_files(self):\n namenode = self.runner.namenode\n for i in range(self.cnt_reducers):\n fname = '%s.%s' % (self.output_dir, reduce_output(self.id, i))\n namenode.create_file(fname)\n self.result_files.append(fname)\n self.open_files.append(fname)\n\n for j in range(self.cnt_mappers):\n fname = map_output(self.id, j, i)\n namenode.create_file(fname)\n self.open_files.append(fname)", "def csvs_scattered_to_grouped(path_dir, inlist, outlist, gcols,\n sort=1, scols=None, catalog=\"\", supersede=False):\n\n filelist=[os.path.join(path_dir,i) for i in inlist]\n n_split=len(outlist)\n\n pdfs=pd.read_csv(filelist[0],usecols=gcols)\n pdfs.drop_duplicates(inplace=True)\n\n print(\"csvs_scattered_to_grouped: Collecting items for group.\\n\")\n for i in range(1,len(filelist)):\n pdfs=pdfs.append(pd.read_csv(filelist[i],usecols=gcols),ignore_index=True)\n pdfs.drop_duplicates(inplace=True)\n\n if sort==1:\n pdfs.sort_values(gcols,inplace=True, ascending=True)\n elif sort==-1:\n pdfs.sort_values(gcols,inplace=True, ascending=False)\n\n aa_ed=np.array_split(pdfs, n_split)\n\n if supersede:\n for i in outlist:\n if os.path.isfile(os.path.join(path_dir,i)):\n os.remove(os.path.join(path_dir,i))\n if os.path.isfile(os.path.join(path_dir,str(catalog))):\n os.remove(os.path.join(path_dir,str(catalog)))\n\n print(\"csvs_scattered_to_grouped: Start processing files:\\n\")\n for i in range(0,len(filelist)):\n fi=pd.read_csv(filelist[i],usecols=scols)\n for j,ja in enumerate(aa_ed):\n wrtj=pd.merge(ja, fi, how='inner', on=gcols)\n append_to_csv(wrtj, os.path.join(path_dir,outlist[j]))\n print('csvs_scattered_to_grouped: '+str(i)+' file(s) finished.')\n\n if catalog:\n for i, d in enumerate(aa_ed):\n d['_@_FILE_']=outlist[i]\n append_to_csv(d, os.path.join(path_dir,str(catalog)))\n print('csvs_scattered_to_grouped: Catalog file created.')", "def initialize_output_files(self):\r\n if not self.C.restart:\r\n print(\"* Touching output files.\", flush=True)\r\n # begin writing `generation.csv` file\r\n csv_path_and_filename = self.C.job_dir + \"generation.csv\"\r\n util.properties_to_csv(\r\n prop_dict=self.ts_properties,\r\n csv_filename=csv_path_and_filename,\r\n epoch_key=\"Training set\",\r\n append=False,\r\n )\r\n\r\n # begin writing `convergence.csv` file\r\n util.write_model_status(append=False)\r\n\r\n # create `generation/` subdirectory to write generation output to\r\n os.makedirs(self.C.job_dir + \"generation/\", exist_ok=True)", "def generate_files(self):\n\t\tapply_stemmer, xml_file, query_file, expected_file = self.read_config_file()\n\t\tself.generate_query_file(query_file, xml_file, apply_stemmer)\n\t\tself.generate_expected_file(expected_file, xml_file)\n\t\tlogging.info('FINALIZADO: MÓDULO PROCESSADOR DE CONSULTAS')", "def read(self):\n all_files = glob.glob(os.path.join(self.path, \"*.csv\"))\n start_time = datetime.now()\n for file in all_files:\n print(\"\\nImporting file: \" + file + \"\\n\")\n command = \"mongoimport -d ci_311db -c ci_311_incident --type csv --file \" + file + \" --headerline \" \\\n \"--columnsHaveTypes --numInsertionWorkers 4\"\n os.system(command)\n end_time = datetime.now()\n print(\"All CSVs imported in collection.\\nTotal import time: \" + str(end_time - start_time))", "def executeImports():\n\tglobal cactusConfig\n\tinputPath = cactusConfig['inputPath']\n\toutputPath = cactusConfig['outputPath']\n\n\t#find all html files in input path\n\tfor filename in glob.glob(os.path.join(inputPath, '*.html')):\n\t\tif os.path.basename(filename) not in cactusConfig:\n\t\t\timportIntoFile(filename, outputPath + os.path.basename(filename))", "def create_export_files(n,input_choice,timing,min_hull_per):\n\n\n\texists = os.path.isdir('analysis')\n\tif exists:\n\t\tf = open('analysis/results.csv','a',newline='')\n\t\tresults = csv.writer(f)\n\telse:\n\t\tos.mkdir('analysis')\n\t\tf = open('analysis/results.csv','w',newline='')\n\t\tresults = csv.writer(f)\n\t\tresults.writerow(['Algo','Size of Input','Min. Hull Pts Per','Type of Input','Timing'])\n\n\n\tresults.writerow(['Graham Scan',n,min_hull_per,input_choice,timing])", "def index_gcis(gcis_url, es_url, index, alias, dump_dir):\n conn = get_es_conn(es_url, index, alias)\n refList = get_refList(dump_dir)\n file_path = \"%s/organization/\"%(dump_dir)\n orgList = get_itemList(dump_dir, \"organization\")\n\n for (root,dirs,files) in os.walk(file_path):\n for f in files:\n f = \"%s%s\"%(file_path, f)\n with open(f) as item:\n jsonFile = json.load(item)\n prov = get_doc_prov(jsonFile, gcis_url, refList, orgList)\n import_prov(conn, index, alias, prov)", "def sync_csv(arg):\n files = os.listdir(arg.input_dir)\n file_map = dict()\n for f in files:\n label = os.path.splitext(f)[0].split('_')\n if len(label) < 2:\n continue\n if file_map.get(label[0], None) is None:\n file_map[label[0]] = dict()\n if label[1] == 'grid':\n file_map[label[0]]['grid'] = f\n else:\n if file_map[label[0]].get('data', None) is None:\n file_map[label[0]]['data'] = dict()\n if file_map[label[0]]['data'].get(label[1], None) is None:\n file_map[label[0]]['data'][label[1]] = []\n file_map[label[0]]['data'][label[1]].append(f)\n tar_name = 't{}'.format(datetime.now().strftime('%Y%m%d'))\n tar_path = os.path.join(arg.output_dir, 'tar', tar_name)\n if not os.path.exists(tar_path):\n os.mkdir(tar_path)\n i = 0\n n = len(file_map)\n for city, v in file_map.items():\n i = i + 1\n print('------ handle city [{}/{}]: {} -------'.format(i, n, city))\n city_csv_path = os.path.join(arg.output_dir, 'csv', city)\n city_sql_path = os.path.join(arg.output_dir, 'sql', city)\n if not os.path.exists(city_csv_path):\n os.mkdir(city_csv_path)\n if not os.path.exists(city_sql_path):\n os.mkdir(city_sql_path)\n grid = v.get('grid', None)\n if grid:\n grid = os.path.splitext(grid)[0]\n print('** handling grid ...')\n tar_sql = os.path.join(tar_path, '{}.sql.gz'.format(grid))\n if os.path.exists(tar_sql):\n print('****** {} exist!'.format(tar_sql))\n else:\n shutil.copyfile(os.path.join(arg.input_dir, '{}.csv'.format(grid)),\n os.path.join(city_csv_path, '{}.csv'.format(grid)))\n trans_grid(city, city_csv_path, city_sql_path)\n print('****** GZIP grid sql')\n with open(os.path.join(city_sql_path, '{}.sql'.format(grid))) as fi, gzip.open(\n os.path.join(tar_path, '{}.sql.gz'.format(grid)), 'wb') as fo:\n fo.write(fi.read().encode())\n data = v.get('data', None)\n if data:\n print('** handling data ...')\n for week, data_files in data.items():\n print('**** week: {}'.format(week))\n tar_detail = os.path.join(tar_path, '{}_{}_detail.sql.tar.gz'.format(city, week))\n if os.path.exists(tar_detail):\n print('****** {} exist!'.format(tar_detail))\n else:\n for data_file in data_files:\n shutil.copyfile(os.path.join(arg.input_dir, data_file), os.path.join(city_csv_path, data_file))\n create_detail(city, week, 30000, city_csv_path, city_sql_path)\n print('****** TAR detail sql')\n with tarfile.open(tar_detail, 'w:gz') as f:\n for city_week_detail in os.listdir(city_sql_path):\n if city_week_detail.startswith('{}_{}_detail'.format(city, week)):\n f.add(os.path.join(city_sql_path, city_week_detail), arcname=city_week_detail)\n print('****** remove csv and sql file...')\n for data_file in data_files:\n os.remove(os.path.join(city_csv_path, data_file))\n sql_files = os.path.join(city_sql_path, '{}_{}_detail*sql'.format(city, week))\n for sql_file in glob.glob(sql_files):\n os.remove(sql_file)", "def get_result_files(self):\n name_pattern = \"{mapper}.{ngs_library.name}\"\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"out\", name_pattern + \"{ext}\"), ext=EXT_VALUES\n )\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"log\", \"{mapper}.{ngs_library.name}.{ext}\"),\n ext=(\n \"log\",\n \"conda_info.txt\",\n \"conda_list.txt\",\n \"log.md5\",\n \"conda_info.txt.md5\",\n \"conda_list.txt.md5\",\n ),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.{report}.txt.md5\"\n ),\n report=(\"bamstats\", \"flagstats\", \"idxstats\"),\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"bam_qc\", name_pattern + \".bam.bamstats.html.md5\"\n )\n )\n\n for sheet in self.shortcut_sheets:\n for ngs_library in sheet.all_ngs_libraries:\n if ngs_library.name in self.ngs_library_to_kit:\n extraction_type = ngs_library.test_sample.extra_infos[\"extractionType\"]\n suffix = (\n \"_long\"\n if ngs_library.extra_infos[\"seqPlatform\"] in (\"PacBio\", \"ONP\")\n else \"\"\n )\n # Per-sample target coverage report.\n yield from expand(\n os.path.join(\n \"output\", name_pattern, \"report\", \"cov_qc\", name_pattern + \".{ext}\"\n ),\n mapper=self.config[\"tools\"][extraction_type.lower() + suffix],\n ngs_library=[ngs_library],\n ext=[\"txt\", \"txt.md5\"],\n )\n yield \"output/target_cov_report/out/target_cov_report.txt\"\n yield \"output/target_cov_report/out/target_cov_report.txt.md5\"\n if (\n self.config[\"picard_hs_metrics\"][\"path_targets_interval_list\"]\n and self.config[\"picard_hs_metrics\"][\"path_baits_interval_list\"]\n ):\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt\"\n )\n )\n yield from self._yield_result_files(\n os.path.join(\n \"output\", name_pattern, \"report\", \"picard_hs_metrics\", name_pattern + \".txt.md5\"\n )\n )\n if self.config[\"compute_coverage_bed\"]:\n yield from self._yield_result_files(\n os.path.join(\"output\", name_pattern, \"report\", \"coverage\", name_pattern + \"{ext}\"),\n ext=(\".bed.gz\", \".bed.gz.tbi\"),\n )\n else:\n print(\n \"Genome-wide coverage BED generation disabled\", file=sys.stderr\n ) # pragma: no cover", "def gen_datafiles():\n\tnum_reads = 10000\n\tnum_samples = 100\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_train.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_train.txt')\n\tgen_sequences('hg38.fa', num_reads, num_samples, 1, 'hg38_test.txt')\n\tgen_sequences('HIV-1.fasta', num_reads, num_samples, 0, 'HIV-1_test.txt')", "def county_file_merger(folder_path):\n\n print(\"\\n*******************--- Starting File Merger for .csv files ---*******************\")\n with open(\"result.csv\",\"wb\") as outfile:\n for filename in os.listdir(folder_path):\n with open(filename,\"rb\") as infile:\n for line in infile:\n outfile.write(line)\n infile.close()\n outfile.close()\n print(\"\\nResult saved to -----> result.csv \")\n print(\"\\n*******************--- Finished File Merger for .csv files ---*******************\")", "def make_csv(idir, dates):\n for path, dirs, files in os.walk(idir):\n for date in dates:\n # first loop over output dir\n if not path.endswith(str(date)):\n continue\n arr = path.split('/')\n oname = '%s-%s.csv' % (arr[-2], arr[-1])\n print(\"write %s\" % oname)\n with open(oname, 'w') as ostream:\n headers = None\n for ifile in files:\n if 'part-' not in ifile:\n continue\n iname = os.path.join(path, ifile)\n with open(iname) as istream:\n first_line = istream.readline()\n if not headers:\n headers = first_line\n ostream.write(headers)\n while True:\n line = istream.readline().replace('\"', '')\n if not line:\n break\n ostream.write(line)", "def batch(infolder, outfile): # type: (str, str) -> None\n\n if not os.path.isdir(infolder):\n return\n\n results = []\n\n for filename in os.listdir(infolder):\n print('Processing ' + filename)\n curresults = []\n if filename.endswith('.txt'):\n with open(os.path.join(infolder, filename), 'r') as curfile:\n curdata = curfile.read() + '\\n'\n curresults = processClauseText(curdata, 'text')\n elif filename.endswith('.pdf'):\n with open(os.path.join(infolder, filename), 'rb') as curfile:\n curdata = base64.b64encode(curfile.read()).decode()\n curresults = processClauseText(curdata, 'pdf')\n elif filename.endswith('.docx'):\n with open(os.path.join(infolder, filename), 'rb') as curfile:\n curdata = base64.b64encode(curfile.read()).decode()\n curresults = processClauseText(curdata, 'word')\n if len(curresults) > 0:\n for result in curresults:\n result['filename'] = filename\n results.extend(curresults)\n\n if outfile is not None:\n with open(outfile, 'w') as outfile:\n json.dump(results, outfile, indent=2)", "def gen_static(self, output_folder):\n files = []\n for l in self.file_listers:\n files += l()\n for f in files:\n _logger.info(\"generating %s\" % f)\n content = self.get(f)\n loc = os.path.join(output_folder, f)\n d = os.path.dirname(loc)\n if not os.path.exists(d):\n os.makedirs(d)\n with open(loc, \"wb\") as file_:\n file_.write(content)", "def generate_data():\n for subdir, dirs, files in os.walk(legend_images_dir):\n for _file in files:\n getTables(_file)\n\n file_list = []\n for subdir, dirs, files in os.walk(pdf_output_dir):\n for _file in files:\n if _file.endswith('.pdf'):\n file_list.append(_file)\n\n print (\"Writing merged output in Output.pdf...\")\n current_dir = os.getcwd()\n mergeOutput(file_list, current_dir + \"/Output.pdf\")\n\n clean()", "def get_gbk_files(self):\n\n db_name = []\n for file in os.listdir(os.getcwd()):\n if file.endswith('.db'):\n db_name.append(str(file))\n\n for G_key, G_value in self.what.tier_frame_dict.items():\n Tier = G_key\n os.chdir(self.path)\n os.mkdir(Tier)\n os.chdir(Tier)\n Tier_path = os.getcwd()\n for Gene in self.what.tier_frame_dict[Tier].T:\n os.chdir(Tier_path)\n os.mkdir(Gene)\n os.chdir(Gene)\n for Organism in self.what.org_list:\n Accession = str(self.what.gene_dict[Gene][Organism])\n Accession, Sup, Version = Accession.partition('.')\n Accession = Accession.upper()\n server_flag = False\n for name in db_name:\n if server_flag is True:\n break\n name = str(name)\n server = BioSeqDatabase.open_database(\n driver='sqlite3', db=where.VERT_MAM + ('/Databases/%s' % name))\n for sub_db_name in server.keys():\n db = server[sub_db_name]\n\n try:\n record = db.lookup(accession=Accession)\n with open('%s_%s.gbk' % (Gene, Organism), 'w') as GB_file:\n GB_file.write(record.format('genbank'))\n print(GB_file.name, 'created')\n server_flag = True\n break\n except IndexError:\n print('Index Error')\n continue", "def main(input_filepath, output_filepath):\n productsDict = dataToDict(input_filepath)\n productsList = dictToCSV(productsDict)\n toCSV(productsList, output_filepath)\n\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')", "def _cmd_import_picard(args):\n for fname in args.targets:\n if not os.path.isfile(fname):\n # Legacy usage: previously accepted directory as an argument\n raise ValueError(\"Not a file: %s\" % fname)\n garr = importers.do_import_picard(fname)\n outfname = \"{}.{}targetcoverage.cnn\".format(\n garr.sample_id, \"anti\" if \"antitarget\" in fname else \"\"\n )\n if args.output_dir:\n if not os.path.isdir(args.output_dir):\n os.mkdir(args.output_dir)\n logging.info(\"Created directory %s\", args.output_dir)\n outfname = os.path.join(args.output_dir, outfname)\n tabio.write(garr, outfname)", "def _OpenOutputFiles(self):\n self.gfile = open(self.geomout, \"w\")\n self.efile = open(self.energyout, \"w\")\n self.PrintEnergyHeader()", "def main():\n glob_pattern = \"{root}/{child}/*.xml\".format(root=MANCHESTER_ROOT, child=TARGET_CHILD)\n corpus_files = glob(glob_pattern)\n for filename in corpus_files:\n print(filename)\n to_csv(filtered_parent_freq_count([filename], 2))" ]
[ "0.5798412", "0.57970154", "0.57526034", "0.5752054", "0.5739597", "0.5733188", "0.5693836", "0.5582578", "0.55606", "0.55546856", "0.5545329", "0.5531751", "0.55185336", "0.55129224", "0.5498727", "0.5498189", "0.54929864", "0.5453699", "0.54207224", "0.54065645", "0.53964186", "0.5395109", "0.5381983", "0.5373694", "0.5368446", "0.53582823", "0.53547335", "0.5345383", "0.5343916", "0.53428453" ]
0.76203096
0
Generate gene rows for every feature in a genbank object.
def generate_genes(genbank): for (idx, feature) in enumerate(genbank.features): if feature.type == 'source' or feature.type == 'gene': continue row = { 'location_start': feature.location.start, 'location_end': feature.location.end, 'strand': feature.strand, 'ref': feature.ref, 'ref_db': feature.ref_db } for (name, val) in feature.qualifiers.items(): # For some reason, all values under .qualifiers are lists of one elem # We join the elems into a string just in case there are ever multiple items row[name] = ', '.join(val) if not row.get('locus_tag'): # No locus tag; skip this one. We can only use features with locus tags. continue row['_key'] = row['locus_tag'] # Generate the DNA sequence using biopython # https://biopython.org/DIST/docs/api/Bio.SeqFeature.SeqFeature-class.html#extract seq_obj = SeqFeature(feature.location, feature.type) # type: SeqFeature seq_str = str(seq_obj.extract(genbank.seq)) row['dna_sequence'] = seq_str yield row
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_genome(genbank):\n row = {\n '_key': genbank.id,\n 'name': genbank.name,\n 'description': genbank.description,\n 'molecule_type': genbank.annotations.get('molecule_type', ''),\n 'topology': genbank.annotations.get('topology', ''),\n 'data_file_division': genbank.annotations.get('data_file_division', ''),\n 'date': genbank.annotations.get('date', ''),\n 'accessions': genbank.annotations.get('accessions', []),\n 'sequence_version': genbank.annotations.get('sequence_version', ''),\n 'source': genbank.annotations.get('source', ''),\n 'dbxrefs': genbank.dbxrefs,\n 'organism_name': genbank.annotations.get('organism', ''),\n 'taxonomy': ', '.join(genbank.annotations.get('taxonomy', '')),\n 'comment': genbank.annotations.get('comment', ''),\n 'annotation_data': {}\n }\n annot_data = genbank.annotations.get('structured_comment', {}).get('Genome-Annotation-Data', {})\n for (key, val) in annot_data.items():\n row['annotation_data'][key] = val\n yield row", "def generate_gene_edges(genbank):\n genome_key = genbank.id\n genome_id = _genome_vert_name + '/' + genome_key\n for (idx, feature) in enumerate(genbank.features):\n # Skip the 'source' feature, which describes the entire genome\n if feature.type == 'source' or 'locus_tag' not in feature.qualifiers:\n continue\n # Generate the edge from gene to genome\n gene_key = feature.qualifiers['locus_tag'][0]\n gene_id = _gene_vert_name + '/' + gene_key\n edge_key = gene_key + '-' + genome_key\n yield {'_from': gene_id, '_to': genome_id, '_key': edge_key}", "def feature_table(chr_id, source, orient, genes, transcripts, cds, exons, unk):\n for gname, ginfo in genes.items():\n line = [str(chr_id), \n 'gbk_to_gff',\n ginfo[3],\n str(ginfo[0]),\n str(ginfo[1]),\n '.',\n ginfo[2],\n '.',\n 'ID='+str(gname)+';Name='+str(gname)+';Note='+ginfo[-1]]\n print '\\t'.join(line) \n ## construct the transcript line is not defined in the original file \n t_line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', ginfo[2], '.'] \n\n if not transcripts:\n t_line.append('ID=Transcript:'+str(gname)+';Parent='+str(gname))\n\n if exons: ## get the entire transcript region from the defined feature\n t_line[3] = str(exons[gname][0][0])\n t_line[4] = str(exons[gname][0][-1])\n elif cds:\n t_line[3] = str(cds[gname][0][0])\n t_line[4] = str(cds[gname][0][-1])\n print '\\t'.join(t_line) \n\n if exons:\n exon_line_print(t_line, exons[gname], 'Transcript:'+str(gname), 'exon')\n\n if cds:\n exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'CDS')\n if not exons:\n exon_line_print(t_line, cds[gname], 'Transcript:'+str(gname), 'exon')\n\n else: ## transcript is defined \n for idx in transcripts[gname]: \n t_line[2] = idx[3]\n t_line[3] = str(idx[0])\n t_line[4] = str(idx[1])\n t_line.append('ID='+str(idx[2])+';Parent='+str(gname))\n print '\\t'.join(t_line) \n \n ## feature line print call \n if exons:\n exon_line_print(t_line, exons[gname], str(idx[2]), 'exon')\n if cds:\n exon_line_print(t_line, cds[gname], str(idx[2]), 'CDS')\n if not exons:\n exon_line_print(t_line, cds[gname], str(idx[2]), 'exon')\n\n if len(genes) == 0: ## feature entry with fragment information \n \n line = [str(chr_id), 'gbk_to_gff', source, 0, 1, '.', orient, '.'] \n fStart = fStop = None \n\n for eid, ex in cds.items(): \n fStart = ex[0][0] \n fStop = ex[0][-1]\n\n for eid, ex in exons.items(): \n fStart = ex[0][0] \n fStop = ex[0][-1]\n\n if fStart or fStart:\n\n line[2] = 'gene'\n line[3] = str(fStart)\n line[4] = str(fStop)\n line.append('ID=Unknown_Gene_' + str(unk) + ';Name=Unknown_Gene_' + str(unk))\n print \"\\t\".join(line)\n\n if not cds:\n line[2] = 'transcript'\n else:\n line[2] = 'mRNA'\n line[8] = 'ID=Unknown_Transcript_' + str(unk) + ';Parent=Unknown_Gene_' + str(unk)\n print \"\\t\".join(line)\n \n if exons:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')\n \n if cds:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'CDS')\n if not exons:\n exon_line_print(line, cds[None], 'Unknown_Transcript_' + str(unk), 'exon')\n \n unk +=1 \n\n return unk", "def _load_genes(self):\n with open(self.gene_file_path, 'r') as gene_file:\n csv_reader = csv.reader(gene_file, delimiter=',')\n for gene in csv_reader:\n yield (gene[self.GENE_NAME_IDX], gene[self.GENE_ID_IDX])", "def geneProcess(self, name):\n self.fileHandle = open(self.fileName, 'r+b')\n self.mm = mmap.mmap(self.fileHandle.fileno(), 0)\n positions = self.geneFeatures[name]\n exons = []\n for position in positions:\n self.mm.seek(position)\n row = self.mm.readline().decode('utf-8').rstrip().split(\"\\t\")\n attributes = row[-1].split(\"; \")\n for attribute in attributes:\n if attribute.startswith(\"gene_type\"):\n _gt = attribute.split(\" \")[-1][1:-1]\n elif attribute.startswith(\"gene_id\"):\n _gid = attribute.split(\" \")[-1][1:-1]\n elif attribute.startswith(\"gene_name\"):\n _gn = attribute.split(\" \")[-1][1:-1]\n exons.append((row[0], int(row[3]), int(row[4]), row[6], _gt, _gid, _gn))\n self.fileHandle.close()\n exons_df = pd.DataFrame(exons, columns=['scaffold', 'start', 'end',\n 'strand', 'gene_type', 'gene_id', 'gene_name'])\n\n for record in self.geneExonicRegions(exons_df):\n yield record", "def genes(gtf_path):\n genes = GenomicDataFrame.from_gtf(\n gtf_path, filter_=lambda rec: rec['feature'] == 'gene')\n genes['strand'] = genes['strand'].map({'+': 1, '-': -1})\n return genes", "def gbk_parse(fname):\n fhand = _open_file(gbkfname)\n unk = 1 \n\n for record in SeqIO.parse(fhand, \"genbank\"):\n\n gene_tags = dict()\n tx_tags = collections.defaultdict(list) \n exon = collections.defaultdict(list) \n cds = collections.defaultdict(list) \n mol_type, chr_id = None, None \n\n for rec in record.features:\n\n if rec.type == 'source':\n mol_type = rec.qualifiers['mol_type'][0]\n try:\n chr_id = rec.qualifiers['chromosome'][0]\n except:\n chr_id = record.name \n continue \n\n strand='-'\n strand='+' if rec.strand>0 else strand\n \n fid = None \n try:\n fid = rec.qualifiers['gene'][0]\n except:\n pass\n\n transcript_id = None\n try:\n transcript_id = rec.qualifiers['transcript_id'][0]\n except:\n pass \n\n if re.search(r'gene', rec.type):\n gene_tags[fid] = (rec.location._start.position+1, \n rec.location._end.position, \n strand,\n rec.type,\n rec.qualifiers['note'][0])\n elif rec.type == 'exon':\n exon[fid].append((rec.location._start.position+1, \n rec.location._end.position))\n elif rec.type=='CDS':\n cds[fid].append((rec.location._start.position+1, \n rec.location._end.position))\n else: \n # get all transcripts \n if transcript_id: \n tx_tags[fid].append((rec.location._start.position+1,\n rec.location._end.position, \n transcript_id,\n rec.type))\n # record extracted, generate feature table\n unk = feature_table(chr_id, mol_type, strand, gene_tags, tx_tags, cds, exon, unk)\n \n #break\n fhand.close()", "def gene(self):\n\t\tif self._record is None:\n\t\t\treturn []\n\t\tgene_list =[i for i in self._record.features if i.type == 'gene']\n\t\treturn gene_list", "def readGenes(gtf):\n #read gtf\n genes = HTSeq.GenomicArrayOfSets(\"auto\", stranded=False)\n gs = {}\n for line in open(gtf):\n if line.startswith(\"#\"):\n continue\n line = line.split(\"\\n\")[0].split(\"\\t\")\n if line[2] != 'exon':\n continue\n ds = parseGtfFeature(line[8])\n key = \"|\".join([ds[\"gene_id\"], ds[\"gene_name\"]])\n nline = [\n line[0], line[3], line[4],\n \"|\".join([ds[\"gene_id\"], ds[\"gene_name\"]]), \".\", line[6]\n ]\n if key not in gs:\n gs[key] = [line[0], int(line[3]), int(line[4])]\n else:\n if int(line[3]) < gs[key][1]:\n gs[key][1] = int(line[3])\n if int(line[4]) > gs[key][2]:\n gs[key][2] = int(line[4])\n for g, v in gs.items():\n iv = HTSeq.GenomicInterval(v[0], v[1], v[2])\n genes[iv] += g\n return genes", "def generate_genome_import_files(genbank_path, output_dir):\n genbank = load_genbank(genbank_path)\n genome_path = os.path.join(output_dir, _genome_vert_name + '.json')\n write_import_file(generate_genome(genbank), genome_path)\n gene_path = os.path.join(output_dir, genbank.id, _gene_vert_name + '.json')\n write_import_file(generate_genes(genbank), gene_path)\n gene_edge_path = os.path.join(output_dir, genbank.id, _gene_edge_name + '.json')\n write_import_file(generate_gene_edges(genbank), gene_edge_path)", "def extract_genes(seq_record):\n return [f for f in seq_record.features if f.type == \"gene\"]", "def generate_genotype(self):\n genes = []\n for i in range(self.n_genes):\n genes.append(self.Gene(n_bases=self.n_bases))\n self.genes = genes", "def __init__(self, geneId, gtfFeature):\n\n self.geneId = geneId\n self.features = {}", "def dataframe_features(df, db):\n def generator():\n for gene_id in df.index:\n yield asinterval(db[gene_id])\n\n return pybedtools.BedTool(generator())", "def genFeatures(dimension, name2features, file_girls, file_boys):\n \n # Load in the data\n Xgirls = name2features(file_girls, B=dimension)\n Xboys = name2features(file_boys, B=dimension)\n X = np.concatenate([Xgirls, Xboys])\n \n # Generate Labels\n Y = np.concatenate([-np.ones(len(Xgirls)), np.ones(len(Xboys))])\n \n # shuffle data into random order\n ii = np.random.permutation([i for i in range(len(Y))])\n \n return X[ii, :], Y[ii]", "def CreateGeneModels(genes_cmpt, transcripts_cmpt, exons_cmpt, utr3_cmpt, utr5_cmpt, cds_cmpt):\n gene_counter, gene_models = 1, []\n for gene_entry in genes_cmpt: ## Figure out the genes and transcripts associated feature \n if gene_entry in transcripts_cmpt:\n gene=init_gene() \n gene['id']=gene_counter\n gene['name']=gene_entry[1]\n gene['chr']=genes_cmpt[gene_entry]['chr']\n gene['source']=genes_cmpt[gene_entry]['source']\n gene['start']=genes_cmpt[gene_entry]['start']\n gene['stop']=genes_cmpt[gene_entry]['stop']\n gene['strand']=genes_cmpt[gene_entry]['strand']\n if not gene['strand'] in ['+', '-']:\n gene['strand']='.' # Strand info not known replaced with a dot symbol instead of None, ?, . etc.\n if len(transcripts_cmpt[gene_entry])>1:\n gene['is_alt_spliced'] = 1\n gene['is_alt'] = 1\n\t gtype=[]\n for tids in transcripts_cmpt[gene_entry]: ## transcript section related tags \n gene['transcripts'].append(tids['ID'])\n\t\tgtype.append(tids['type'])\n exon_cod, utr5_cod, utr3_cod, cds_cod = [], [], [], []\n if (gene['chr'], tids['ID']) in exons_cmpt:\n exon_cod = [[feat_exon['start'], feat_exon['stop']] for feat_exon in exons_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr5_cmpt:\n utr5_cod = [[feat_utr5['start'], feat_utr5['stop']] for feat_utr5 in utr5_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in utr3_cmpt:\n utr3_cod = [[feat_utr3['start'], feat_utr3['stop']] for feat_utr3 in utr3_cmpt[(gene['chr'], tids['ID'])]]\n if (gene['chr'], tids['ID']) in cds_cmpt:\n cds_cod = [[feat_cds['start'], feat_cds['stop']] for feat_cds in cds_cmpt[(gene['chr'], tids['ID'])]]\n if len(exon_cod) == 0: ## build exon coordinates from UTR3, UTR5 and CDS\n if cds_cod != []:\n exon_cod=createExon(gene['strand'], utr5_cod, cds_cod, utr3_cod) \n\n if gene['strand']=='-': ## general order to coordinates\n if len(exon_cod) >1:\n if exon_cod[0][0] > exon_cod[-1][0]:\n exon_cod.reverse()\n if len(cds_cod) >1:\n if cds_cod[0][0] > cds_cod[-1][0]: \n cds_cod.reverse()\n if len(utr3_cod) >1:\n if utr3_cod[0][0] > utr3_cod[-1][0]: \n utr3_cod.reverse()\n if len(utr5_cod) >1:\n if utr5_cod[0][0] > utr5_cod[-1][0]:\n utr5_cod.reverse()\n\n tis, cdsStop, tss, cleave = [], [], [], [] ## speacial sited in the gene region \n if cds_cod != []:\n if gene['strand'] == '+':\n tis = [cds_cod[0][0]]\n cdsStop = [cds_cod[-1][1]-3]\n elif gene['strand'] == '-':\n tis = [cds_cod[-1][1]]\n cdsStop = [cds_cod[0][0]+3]\n if utr5_cod != []:\n if gene['strand'] == '+':\n tss = [utr5_cod[0][0]]\n elif gene['strand'] == '-':\n tss = [utr5_cod[-1][1]]\n if utr3_cod != []:\n if gene['strand'] == '+':\n cleave = [utr3_cod[-1][1]]\n elif gene['strand'] == '-':\n cleave = [utr3_cod[0][0]]\n\n cds_status, exon_status, utr_status = 0, 0, 0 ## status of the complete elements of the gene\n if cds_cod != []: ## adding phase to the CDS region \n cds_cod_phase = addCDSphase(gene['strand'], cds_cod)\n cds_status = 1\n gene['cds_exons'].append(cds_cod_phase)\n\n if exon_cod != []: \n exon_status = 1\n if utr5_cod != [] or utr3_cod != []: \n utr_status = 1\n if cds_status != 0 and exon_status != 0 and utr_status != 0:\n gene['transcript_status'].append(1)\n else:\n gene['transcript_status'].append(0)\n\n if exon_cod: ## final check point for a valid gene model \n gene['exons'].append(exon_cod)\n gene['utr3_exons'].append(utr3_cod)\n gene['utr5_exons'].append(utr5_cod)\n gene['tis'].append(tis)\n gene['cdsStop'].append(cdsStop)\n gene['tss'].append(tss)\n gene['cleave'].append(cleave) \n\t \n\t gtype=list(set(gtype)) ## different types \n gene['gene_info']=dict(ID=gene_entry[1],\n\t\t\t\tSource=genes_cmpt[gene_entry]['source'],\n\t\t\t\tType=gtype)\n gene=FeatureValueFormat(gene) ## get prepare for MAT writing \n gene_counter+=1\n gene_models.append(gene)\n return gene_models", "def create_genes_table(self, fn_genes):\n log.info(\"Creating table with information about the genes ...\")\n gene_record = GeneParser.GeneRecord()\n names = gene_record.fields_names\n types = gene_record.fields_types\n if len(names) != len(types):\n raise ValueError, \"The number of fields is different from the \"\\\n \"number of types\"\n self.create_table(self.GenesTable,names,types)\n fh = open(fn_genes, \"r\")\n log.debug(\"Reading file %s\",fn_genes)\n reader = csv.reader(fh, delimiter=\"\\t\")\n reader.next() # discard first line\n data = []\n for row in reader:\n if row[0] == \"\":\n continue\n g = GeneParser.GeneRecord()\n g.read(reader, row)\n data.append(g.get_values())\n self.store_data(self.GenesTable,data)", "def gtf_processing(genome=None, prefix='gencov'):\n all_bed = prefix + \".all.bed\"\n\n if not os.path.exists(all_bed) or os.stat(all_bed).st_size == 0:\n log.info(\"Preprocessing annotation...\")\n features = ('exon', 'gene', 'intron', 'intergenic')\n merged_exons, merged_genes = map(preprocess, features[:2])\n ins = {\n 'intron': [merged_genes, merged_exons],\n 'intergenic': [merged_genes, genome]\n }\n intron_bed, intergenic_bed = map(preprocess, features[2:], [ins, ins])\n\n log.info(\"Concatenate bed files for all elements...\")\n with open(all_bed, 'w') as out_bed:\n cat_all(merged_exons, merged_genes, intron_bed, intergenic_bed, out_bed=out_bed)\n\n for f in (merged_exons, merged_genes, intron_bed, intergenic_bed):\n os.remove(f)\n\n return all_bed", "def write_genes_bed(db, bed_path, verbose=True):\n genes = db.features_of_type('gene')\n\n i = 0\n\n with open(bed_path, 'w') as out:\n for g in genes:\n i += 1\n if verbose:\n if i == 1:\n print(\"Writing records:\\n1 ..\")\n elif i%2500 == 0:\n print(i)\n out.write(db.bed12(g.id, name_field='gene_id')+'\\n')", "def geneExonicRegions(self, df):\n scaffold = df.iloc[0].scaffold\n strand = df.iloc[0].strand\n gene_type = df.iloc[0].gene_type\n gene_id = df.iloc[0].gene_id\n gene_name = df.iloc[0].gene_name\n start = df.start.min()\n end = df.end.max()\n bp = [False] * (end - start + 1)\n for i in range(df.shape[0]):\n s = df.iloc[i]['start'] - start\n e = df.iloc[i]['end'] - start + 1\n bp[s:e] = [True] * (e - s)\n regions = list(range(start, end + 1))\n groups = []\n\n for i, j in groupby(bp):\n groups.append((i, len(list(j))))\n e_start = 0\n\n for i in groups:\n e_end = e_start + i[1]\n if i[0]:\n record = Record(scaffold=scaffold, start=regions[e_start],\n end=regions[e_end - 1], gene_type=gene_type, gene_id=gene_id,\n gene_name=gene_name, strand=strand)\n yield record\n e_start += i[1]", "def generateFeatures(self, data):\n pass", "def genes(context):\n LOG.info(\"Running scout export genes\")\n adapter = context.obj['adapter']\n \n header = [\"#Chrom\\tStart\\tEnd\\tHgncSymbol\\tHgncID\"]\n\n for line in header:\n click.echo(line)\n\n for gene in export_genes(adapter):\n click.echo(gene)", "def _generate(self, input_row, output_row):\n self._fullInput = input_row\n self.power = self.settings.population_count\n self._fullOutput = output_row\n for one_forest in range(self.power):\n self._forests.append(OneForest(self.settings, input_row=self._fullInput, full_output=self._fullOutput))", "def create_samples(self):\n sample_list = []\n genes = []\n for record in range(len(self.data_dict[\"samples\"])):\n sample_id = self.data_dict[\"samples\"][record]\n genes_cols = list(self.data_dict.keys())[2:]\n for gene in genes_cols:\n genes.append(self.data_dict[gene][record])\n label = self.data_dict[\"type\"][record]\n sample_list.append(Sample(sample_id, genes, label))\n genes = []\n return sample_list", "def __iter__(self):\n for feature in self.features:\n yield feature", "def generate_table(self, rows):\n ...", "def generate_all_features():\n all_sizes = generate_all_sizes()\n\n timestamp = time()\n all_features = []\n for size in all_sizes:\n all_features.append(Feature(size, A))\n all_features.append(Feature(size, B1))\n all_features.append(Feature(size, B2))\n all_features.append(Feature(size, C1))\n all_features.append(Feature(size, C2))\n\n stdout.write(\"\\rGenerate all features: {}\\r\".format(time() - timestamp))\n\n return all_features", "def iter_features(self):\n features = self.features\n if (features is not None):\n yield from features", "def index_gff(gff, logger):\n f_in = open(gff, \"r\")\n gene_start_stop_dict = dict()\n gene_scaff_dict = dict()\n gene_first_exon_dict = dict()\n gene_direction = dict()\n gene_gff_line = dict()\n gene_set = set([])\n for line in f_in:\n if line.startswith(\"#\"):\n continue\n if not line.strip():\n continue\n assert len(line.split(\"\\t\")) == 9 , \"GFF fields wrong length should be 9\"\n scaff, source, feature, start, stop, score, \\\n direction, frame, gene_info = line.split(\"\\t\")\n gene = split_gene_name(gene_info)\n scaff = scaff.rstrip()\n if feature == \"gene\":\n gene_gff_line[gene] = line\n gene_set.add(gene)\n start_stop = \"%s\\t%s\" % (start, stop)\n gene_start_stop_dict[gene] = start_stop\n gene_scaff_dict[gene] = scaff\n gene_direction[gene] = direction\n if not gene in gene_first_exon_dict.keys():\n if feature == \"exon\" or feature == \"CDS\":\n start_stop = \"%s\\t%s\" % (start, stop)\n gene_first_exon_dict[gene] = start_stop\n f_in.close()\n logger.info(\"Number of genes = %d\", len(gene_set))\n return gene_start_stop_dict, gene_first_exon_dict, \\\n gene_scaff_dict, gene_direction, gene_set, gene_gff_line", "def build_gene_indexes(df):\n\tgeneDict = OrderedDict()\n\n\tgeneCount = 0\n\tpreviousGeneIndex = 0\n\n\tcurrent_id=\"\"\n\tcurrent_gene=\"\"\n\n\tfor i in range(len(df)):\n\n\t\tif df.loc[i,'feature'] == 'gene':\n\t\t\ttrdict = parse_entry(df.loc[i,'transcript_id'])\n\n\t\t\tcurGeneID = trdict['gene_id'][0]\n\t\t\n\t\t\tif geneCount != 0:\n\t\t\t\tnewGeneIndex = i\n\t\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\t\t\t\tpreviousGeneIndex = i\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\t\tgeneCount += 1\n\n\t\t\telse:\n\t\t\t\tnewgeneIndex = 0\n\t\t\t\tgeneCount +=1\n\t\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\tif i == (len(df)-1):\n\t\t\tnewGeneIndex = i+1\n\t\t\tcurrent_id = trdict['gene_id'][0]\n\t\t\tgeneDict[current_id] = [previousGeneIndex,newGeneIndex]\n\treturn geneDict" ]
[ "0.65353125", "0.6407948", "0.6181564", "0.6134817", "0.6083009", "0.6072681", "0.5995177", "0.5953996", "0.58472127", "0.5838847", "0.5811208", "0.5793491", "0.575938", "0.575097", "0.568226", "0.5674407", "0.5673749", "0.56730604", "0.56158006", "0.56121176", "0.5607535", "0.55849785", "0.5576716", "0.5545028", "0.55121404", "0.5473905", "0.54729944", "0.5377226", "0.5372529", "0.53614163" ]
0.8024798
0
Generate genetogenome edges for every feature in a genbank object.
def generate_gene_edges(genbank): genome_key = genbank.id genome_id = _genome_vert_name + '/' + genome_key for (idx, feature) in enumerate(genbank.features): # Skip the 'source' feature, which describes the entire genome if feature.type == 'source' or 'locus_tag' not in feature.qualifiers: continue # Generate the edge from gene to genome gene_key = feature.qualifiers['locus_tag'][0] gene_id = _gene_vert_name + '/' + gene_key edge_key = gene_key + '-' + genome_key yield {'_from': gene_id, '_to': genome_id, '_key': edge_key}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_genes(genbank):\n for (idx, feature) in enumerate(genbank.features):\n if feature.type == 'source' or feature.type == 'gene':\n continue\n row = {\n 'location_start': feature.location.start,\n 'location_end': feature.location.end,\n 'strand': feature.strand,\n 'ref': feature.ref,\n 'ref_db': feature.ref_db\n }\n for (name, val) in feature.qualifiers.items():\n # For some reason, all values under .qualifiers are lists of one elem\n # We join the elems into a string just in case there are ever multiple items\n row[name] = ', '.join(val)\n if not row.get('locus_tag'):\n # No locus tag; skip this one. We can only use features with locus tags.\n continue\n row['_key'] = row['locus_tag']\n # Generate the DNA sequence using biopython\n # https://biopython.org/DIST/docs/api/Bio.SeqFeature.SeqFeature-class.html#extract\n seq_obj = SeqFeature(feature.location, feature.type) # type: SeqFeature\n seq_str = str(seq_obj.extract(genbank.seq))\n row['dna_sequence'] = seq_str\n yield row", "def feature_generator(g, edge_to_exclusions):\n feature_paths = g.graph['metapaths']\n prediction_metapath = schema.MetaPath((g.graph['source_kind'], g.graph['edge_key'], g.graph['target_kind']))\n if prediction_metapath in feature_paths:\n feature_paths.remove(prediction_metapath)\n \n kind_to_nodes = nxutils.get_kind_to_nodes(g)\n\n edge_to_exclusions = collections.OrderedDict(sorted(edge_to_exclusions.items()))\n\n for edge, exclusions in edge_to_exclusions.iteritems():\n source, target, edge_key = edge\n metapath_to_metric_dict = metapaths.features_for_metapaths(\n g, source, target, edge_key, exclusions, feature_paths)\n feature_dict = metapaths.flatten_feature_dict(metapath_to_metric_dict)\n combined_dict = collections.OrderedDict()\n combined_dict['source'] = source\n #combined_dict['source_name'] = g.node[source]['name']\n combined_dict['target'] = target\n combined_dict['target_name'] = g.node[target]['name']\n combined_dict['status'] = edge_status(g, source, target)\n combined_dict.update(feature_dict)\n yield combined_dict", "def gen_graph(self):", "def generate_genotype(self):\n genes = []\n for i in range(self.n_genes):\n genes.append(self.Gene(n_bases=self.n_bases))\n self.genes = genes", "def test_create_gene_ontology(self):\n\n # Here are mappings for just a few yeast genes.\n\n mapping = {}\n mapping['STE7'] = ['GO:0000187']\n mapping['PBS2'] = ['GO:0000187']\n mapping['NOP8'] = [\n 'GO:0003676', 'GO:0003723', 'GO:0042254', 'GO:0005634', 'GO:0005730'\n ]\n\n # Build the ontology, then see if it looks correct.\n\n root = dc.models.tensorgraph.models.ontology.create_gene_ontology(\n mapping, min_node_features=1)\n assert len(root.feature_ids) == 0\n\n def find_features(node, features):\n features.update(node.feature_ids)\n for child in node.children:\n find_features(child, features)\n\n all_features = set()\n find_features(root, all_features)\n assert len(all_features) == 3\n for key in mapping:\n assert key in all_features", "def all_edges_as_iterable(self, include_metadata: bool = False) -> Generator:\n if include_metadata:\n return [\n (\n self._names.get_name(u),\n self._names.get_name(v),\n self._meta.get_edge(\n self._names.get_name(u), self._names.get_name(v)\n ),\n )\n for u, v in self._nk_graph.iterEdges()\n ]\n return [\n (self._names.get_name(u), self._names.get_name(v))\n for u, v in self._nk_graph.iterEdges()\n ]", "def __generate_edges(self):\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n edges.append({vertex, neighbour})\n return edges", "def edges(self):\n for e in self._edges:\n yield e", "def finalize_graph(self) -> None:\n digraph = nx.MultiDiGraph()\n\n for node in self.graph.iternodes():\n attributes = self.get_attributes(node)\n attributes[\"schema\"] = node.type.name\n if node.caption is not None:\n attributes[\"label\"] = node.caption\n if node.is_entity and node.schema is not None:\n attributes[\"schema\"] = node.schema.name\n digraph.add_node(node.id, **attributes)\n\n for edge in self.graph.iteredges():\n attributes = self.get_attributes(edge)\n attributes[\"schema\"] = edge.type_name\n attributes[\"weight\"] = str(edge.weight)\n digraph.add_edge(edge.source_id, edge.target_id, key=edge.id, **attributes)\n\n for line in generate_gexf(digraph, prettyprint=True):\n self.fh.write(line)\n self.fh.write(\"\\n\")", "def edges_iter(self) -> Generator:\n for u, v, k, data in self.graph.edges(keys=True, data=True):\n yield u, v, k, data", "def all_edges_as_iterable(self, include_metadata: bool = False) -> Generator:\n if include_metadata:\n return iter(\n [\n (e[\"source\"], e[\"target\"], _node_to_metadata(e[\"properties\"]))\n for e in (\n self._g.V()\n .outE()\n .project(\"target\", \"source\", \"properties\")\n .by(__.inV().values(ID))\n .by(__.outV().values(ID))\n .by(__.valueMap(True))\n .toList()\n )\n ]\n )\n return iter(\n [\n (e[\"source\"], e[\"target\"])\n for e in self._g.V()\n .outE()\n .project(\"target\", \"source\")\n .by(__.inV().values(ID))\n .by(__.outV().values(ID))\n .toList()\n ]\n )", "def gexf_graph():\n # you must replace these lines and supply your own graph\n \n \n \n my_gexf = Gexf(\"JiajiaXie\", \"My awesome graph\")\n graph=my_gexf.addGraph(\"undirected\", \"static\", \"My awesome networks\")\n \n atr1=graph.addNodeAttribute('Type',type='string')\n\n\n for set in data_specific:\n if graph.nodeExists(set['set_num']) ==0:\n tm1=graph.addNode(set['set_num'], set['name'], r='0', g='0', b='0')\n tm1.addAttribute(atr1,\"set\")\n\n\n\n counter_test=1\n for set, part in data_parts.items():\n for key, part_list in part.items():\n interme =part_list['color']\n red=interme[0]+interme[1]\n green=interme[2]+interme[3]\n blue=interme[4]+interme[5]\n\n red_de=str(int(red,16))\n green_de=str(int(green,16))\n blue_de=str(int(blue,16))\n if graph.nodeExists(part_list['id'])==0:\n tm2=graph.addNode(part_list['id'], part_list['part_name'],r=red_de, g=green_de, b = blue_de)\n tm2.addAttribute(atr1,\"part\")\n\n\n counter_test+=1\n graph.addEdge(\"_\"+str(counter_test), set, part_list['id'], part_list['quantity'])\n\n\n\n f=open('bricks_graph.gexf','wb')\n my_gexf.write(f)\n\n\n return my_gexf.graphs[0]", "def iteredges(self):\n for source, targets in self.successors.items():\n for target in targets:\n yield source, target", "def generate_genome_import_files(genbank_path, output_dir):\n genbank = load_genbank(genbank_path)\n genome_path = os.path.join(output_dir, _genome_vert_name + '.json')\n write_import_file(generate_genome(genbank), genome_path)\n gene_path = os.path.join(output_dir, genbank.id, _gene_vert_name + '.json')\n write_import_file(generate_genes(genbank), gene_path)\n gene_edge_path = os.path.join(output_dir, genbank.id, _gene_edge_name + '.json')\n write_import_file(generate_gene_edges(genbank), gene_edge_path)", "def _process_genotypes(self, limit):\n if self.testMode:\n g = self.testgraph\n else:\n g = self.graph\n model = Model(g)\n line_counter = 0\n\n raw = '/'.join((self.rawdir, 'genotype'))\n logger.info(\"building labels for genotypes\")\n geno = Genotype(g)\n fly_tax = 'NCBITaxon:7227'\n with open(raw, 'r') as f:\n f.readline() # read the header row; skip\n filereader = csv.reader(f, delimiter='\\t', quotechar='\\\"')\n for line in filereader:\n line_counter += 1\n\n (genotype_num, uniquename, description, name) = line\n\n # if self.testMode is True:\n # if int(object_key) not in self.test_keys.get('genotype'):\n # continue\n\n # add the internal genotype to pub mapping\n genotype_id = 'MONARCH:FBgeno'+str(genotype_num)\n self.idhash['genotype'][genotype_num] = genotype_id\n\n if description == '':\n description = None\n\n if not self.testMode \\\n and limit is not None and line_counter > limit:\n pass\n else:\n if self.testMode and \\\n int(genotype_num) not in \\\n self.test_keys['genotype']:\n continue\n\n model.addIndividualToGraph(\n genotype_id, uniquename,\n Genotype.genoparts['intrinsic_genotype'],\n description)\n # we know all genotypes are in flies\n # FIXME we assume here they are in melanogaster,\n # but that isn't necessarily true!!!\n # TODO should the taxon be == genomic background?\n geno.addTaxon(fly_tax, genotype_id)\n genotype_iid = self._makeInternalIdentifier(\n 'genotype', genotype_num)\n model.addComment(\n genotype_id, genotype_iid)\n if name.strip() != '':\n model.addSynonym(genotype_id, name)\n\n return", "def __generate_edges(self):\r\n edges = []\r\n for vertex in self.__graph_dict:\r\n for neighbor in self.__graph_dict[vertex]:\r\n if {neighbor, vertex} not in edges:\r\n edges.append({vertex, neighbor})\r\n return edges", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if (neighbour, vertex) not in edges:\n edges.append((vertex, neighbour))\n \n for pair in edges:\n for otherpair in edges:\n if pair[1] == otherpair[0]:\n edges.append((pair[0],otherpair[1]))\n return edges", "def edges(self):\r\n return self.__generate_edges()", "def visitEdges(self) -> None:\n\n for node in self.nodesMap_.values():\n for nodeInput in node.get_inputs():\n i = nodeInput[0]\n if i.get_name() not in self.nodesMap_:\n print(i.get_kind_name(), i.get_name())\n edgeStr = self.get_unique_vertex_name(i) + \":Outputs -> \"\n edgeStr += self.get_unique_vertex_name(node) + \":Inputs\"\n self.edges_.append(edgeStr)", "def generate_model(self):\n rootpath = 'c:\\\\Users\\\\Gamelab\\\\Desktop\\\\RT\\\\Others\\\\Thesis\\\\Thesis_coding\\\\ABM\\\\' \n \n df = pd.read_csv(rootpath+'data\\\\subset_initialized_latlonvalues.csv')\n df = df.drop(columns='Unnamed: 0')\n households_in_block = {}\n household_ids_in_block = {}\n # holds all the graphs indexed by blockid [geoid]\n \n def add_and_remove_edges(G, p_new_connection, p_remove_connection): \n\n new_edges = [] \n rem_edges = [] \n for node in G.nodes(): \n # find the other nodes this one is connected to \n connected = [to for (fr, to) in G.edges(node)] \n # and find the remainder of nodes, which are candidates for new edges \n unconnected = [n for n in G.nodes() if not n in connected] \n\n # probabilistically add a random edge \n if len(unconnected): # only try if new edge is possible \n if random.random() < p_new_connection: \n new = random.choice(unconnected) \n G.add_edge(node, new) \n #print(\"\\tnew edge:\\t {} -- {}\".format(node, new) \n new_edges.append( (node, new) ) \n # book-keeping, in case both add and remove done in same cycle \n unconnected.remove(new) \n connected.append(new) \n\n # probabilistically remove a random edge \n if len(connected): # only try if an edge exists to remove \n if random.random() < p_remove_connection: \n remove = random.choice(connected) \n G.remove_edge(node, remove) \n #print \"\\tedge removed:\\t {} -- {}\".format(node, remove) \n rem_edges.append( (node, remove) ) \n # book-keeping, in case lists are important later? \n connected.remove(remove) \n unconnected.append(remove) \n return rem_edges, new_edges\n\n\n\n\n #now i need to get number of geoids unique \n for block in df['geoid'].unique(): \n G_temp=nx.Graph()\n households_in_block[block] = df[df['geoid']==block] # contains all the information about the households \n household_ids_in_block[block] = df[df['geoid']==block]['CASE_ID'].values \n # contains only their ID\n # you only need id to initialize a node\n tempdf = households_in_block[block]\n for household in household_ids_in_block[block]:\n lon = tempdf.loc[tempdf['CASE_ID']==household,'lon'].values[0]\n lat = tempdf.loc[tempdf['CASE_ID']==household,'lat'].values[0] \n \n G_temp.add_node(str(household), pos=(lon,lat))\n self.G.add_node(str(household), pos=(lon,lat))\n \n ## add G to the dictionary\n self.graph_dict[block] = G_temp\n \n \n rem_edges, new_edges = add_and_remove_edges(self.G, 0.5, 0.5)\n self.G.remove_edges_from(rem_edges)\n self.G.add_edges_from(new_edges)\n\n \n\n self.grid= NetworkGrid(self.G)\n \n for _, row in df.iterrows(): # index, row in ...\n \n agent = Household(unique_id = str(row['CASE_ID']),\n model = self, \n income = row['income'],\n age= row['age'],\n size= row['household_'],\n ami_category = row['ami_categ'],\n elec_consumption= row['elec_consumption'],\n attitude = row['attitude'],\n pbc = row['pbc'],\n subnorms = row['subnorms'],\n geoid = row['geoid'],\n tract = row['tract'],\n bgid = row['bgid'],\n adoption_status = 0)\n \n \n\n if agent:\n self.schedule.add(agent)\n y = row['lat']\n x = row['lon']\n self.grid.place_agent(agent, node_id=agent.unique_id)\n #self.space.place_agent(agent, (x, y))\n #agent.pos = (x, y)", "def __generate_edges(self):\n\n edges = []\n for vertex in self.__graph_dict:\n for neighbour in self.__graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append( {vertex,neighbour} )\n return edges", "def genFeatures(dimension, name2features, file_girls, file_boys):\n \n # Load in the data\n Xgirls = name2features(file_girls, B=dimension)\n Xboys = name2features(file_boys, B=dimension)\n X = np.concatenate([Xgirls, Xboys])\n \n # Generate Labels\n Y = np.concatenate([-np.ones(len(Xgirls)), np.ones(len(Xboys))])\n \n # shuffle data into random order\n ii = np.random.permutation([i for i in range(len(Y))])\n \n return X[ii, :], Y[ii]", "def bfs_edges_generator(graph, source, reverse=...):\n ...", "def generate(self):\n self.generate_points()\n self.generate_edges()", "def generate_edges(self):\n edges = []\n for vertex in self.graph_dict:\n for neighbour in self.graph_dict[vertex]:\n if {neighbour, vertex} not in edges:\n edges.append({neighbour, vertex})\n return edges", "def generate_edgelist(H, delimiter=\" \"):\n for id in H.edges:\n e = H.edges.members(id)\n yield delimiter.join(map(str, e))", "def add_edge_features(edge_df, X_df, structures_df, ia_df, neighbors_df,obabel_atom_df):\n X_df = add_electronetivity_features(X_df)\n\n edge_df = get_symmetric_edges(edge_df)\n X_df = add_neighbor_count_features(edge_df, X_df, structures_df)\n add_bond_atom_aggregation_features(edge_df, X_df, structures_df, ia_df,\n neighbors_df,obabel_atom_df)\n # remove useless features.\n\n return X_df", "def edges(self):\n return self.generate_edges()", "def export_events_gtf(self, edge):\n strand = self.gene.strand\n for event in self.positive_ids:\n full_event = '{}:{}'.format(self.etype, event)\n e_vals = full_event.replace('-', ':').split(':')\n\n line1 = self.gtf_string.format(int(e_vals[2]), int(e_vals[3]) + edge, strand, full_event, full_event,\n 'alternative1')\n yield line1, self.etype\n\n line3 = self.gtf_string.format(e_vals[2], int(e_vals[3]) + edge, strand, full_event, full_event,\n 'alternative2')\n yield line3, self.etype", "def _generate_feature_tree(self, features):\n # build a set of all features, including top-level features and\n # dependencies.\n self.top_level_features = defaultdict(list)\n\n # find top-level features and index them by entity id.\n for f in self.all_features:\n _, num_forward = self.entityset.find_path(self.target_eid, f.entity.id,\n include_num_forward=True)\n if num_forward or f.entity.id == self.target_eid:\n self.top_level_features[f.entity.id].append(f)" ]
[ "0.69899094", "0.61919343", "0.61718875", "0.58896923", "0.5833681", "0.5817742", "0.57499737", "0.57238543", "0.5720613", "0.57019067", "0.5695498", "0.56385654", "0.5601511", "0.558328", "0.55295444", "0.550557", "0.5473217", "0.54718006", "0.5461635", "0.5460057", "0.5459168", "0.54576963", "0.5455112", "0.54300916", "0.5425506", "0.54228485", "0.5412252", "0.5403121", "0.5391136", "0.5388688" ]
0.77022773
0
Given a permutation of {0,1,...,n1} return the 2^n by 2^n permuation matrix representing the permutation of qubits (bigendian convention).
def lift_perm(p: Dict[int, int]) -> np.ndarray: n = len(p) pm = np.zeros((1 << n, 1 << n), dtype=complex) for i in range(1 << n): j = 0 mask = 1 << n for q in range(n): mask >>= 1 if (i & mask) != 0: j |= 1 << (n - 1 - p[q]) pm[j][i] = 1 return pm
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def makePermutationMatrix(permList):\n permList = scipy.array(permList)\n n = len(permList)\n if 0 not in permList:\n permList = permList - 1\n permMat = scipy.zeros((n,n),'d')\n for ii, jj in enumerate(permList):\n permMat[ii,jj] = 1.\n return scipy.transpose(permMat)", "def bitreversal_permutation(n):\n m = int(math.log2(n))\n assert n == 1 << m, 'n must be a power of 2'\n perm = np.arange(n).reshape(n, 1)\n for i in range(m):\n n1 = perm.shape[0] // 2\n perm = np.hstack((perm[:n1], perm[n1:]))\n return perm.squeeze(0)", "def mat24_perm_to_int(p):\n oct = sum(1 << x for x in p[:8])\n res = gc.vect_to_octad(oct) \n #print(\"p2i oct\", hex(oct))\n res -= STD_OCTAD\n res += (res >> 12) & 759 \n #print(\"p2i\", res)\n p1 = [24]*32\n oct, j = 8 * oct, 0x00 \n for i in range(24):\n o = oct & 8\n p1[i] = (j >> o) & 0x1f\n j += 1 << o\n oct >>= 1\n q, q_inv = [None]*8, [None]*8\n for i in range(8):\n j = p1[p[i] & 0x1f] & 7\n q[j] = i\n q_inv[i] = j\n for i in range(6):\n # exchange place i with place q_inv[i]\n j = q_inv[i]\n #q_inv[q[i]], q_inv[q[j]] = q_inv[q[j]], q_inv[q[i]]\n #q[i], q[j] = q[j], q[i]\n #assert q[:i] == q_inv[:i] == lrange(i)\n q_inv[q[i]] = q_inv[q[j]]\n q[j] = q[i]\n #print(\"p2i%d\" % i, j-i) \n res = res * (8 - i) + j - i\n #print(\"p2ifinal\", p1[p[8] & 0x1f]) \n return 16 * res + p1[p[8] & 0x1f]", "def permutation_test_mat(matrix,\n n_1, n_2, n_permutations,\n a00=1, a11=1, a01=0):\n n = n_1 + n_2\n pi = np.zeros(n, dtype=np.int8)\n pi[n_1:] = 1\n\n larger = 0.\n count = 0\n \n for sample_n in range(1 + n_permutations):\n count = 0.\n for i in range(n):\n for j in range(i, n):\n mij = matrix[i, j] + matrix[j, i]\n if pi[i] == pi[j] == 0:\n count += a00 * mij\n elif pi[i] == pi[j] == 1:\n count += a11 * mij\n else:\n count += a01 * mij\n if sample_n == 0:\n statistic = count\n elif statistic <= count:\n larger += 1\n\n np.random.shuffle(pi)\n\n return larger / n_permutations", "def permutation_matrix(order):\n matrix = np.zeros([order,order])\n matrix[-1,0] = 1\n matrix[0:-1,1::] = np.identity(order-1)\n return matrix", "def permute(n, r):\n\n product = 1\n for i in range(n - r + 1, n + 1):\n product *= i\n return product", "def permutation_helper(random_state, n, shape):\r\n # n should be a 0-dimension array\r\n assert n.shape == ()\r\n # Note that it is important to convert `n` into an integer, because if it\r\n # is a long, the numpy permutation function will crash on Windows.\r\n n = int(n.item())\r\n\r\n if shape is None:\r\n # Draw only one permutation, equivalent to shape = ()\r\n shape = ()\r\n out_shape = list(shape)\r\n out_shape.append(n)\r\n out = numpy.empty(out_shape, int)\r\n for i in numpy.ndindex(*shape):\r\n out[i] = random_state.permutation(n)\r\n\r\n #print 'RETURNING', out.shape\r\n return out", "def perm_matrix(x):\n n = len(x)\n return np.eye(n, n)[:, x]", "def makePermutations(n):\n\thalf = n // 2\n\tfull = half * 2\n\tswap = np.random.rand(half) > 0.5\n\tpx = np.arange(n)\n\tpx[:full:2] += swap\n\tpx[1:full:2] -= swap\n\treturn px", "def all_permutations(q_1: Q) -> Qs:\n\n results = []\n\n for perm in itertools.permutations(\"txyz\"):\n results.append(permutation(q_1, perm=perm))\n\n return Qs(results)", "def permute(seq, permutation):\n return [seq[i] for i in permutation]", "def hpint_perm(n):\n c_new = []\n D_new = []\n H_new = []\n for i in range(2 ** n - 1):\n c_new_i = np.zeros((n, 1))\n binStr = bin(i + 1)[2:]\n for j in range(len(binStr)):\n c_new_i[n - 1 - j][0] = int(binStr[len(binStr) - 1 - j])\n c_new.append(c_new_i)\n D_new_i = np.diag(np.transpose(c_new_i)[0])\n D_new.append(D_new_i)\n H_new_i = np.diag(np.transpose(c_new_i * (-2) + 1)[0])\n H_new.append(H_new_i)\n\n return c_new, D_new, H_new", "def permutations(n, r):\n result = 1\n for i in range(n, n-r, -1):\n result *= i\n return result", "def hpint_perm_torch(n):\n c_new = []\n D_new = []\n H_new = []\n for i in range(2 ** n - 1):\n c_new_i = torch.zeros(n, 1).to(device)\n binStr = bin(i + 1)[2:]\n for j in range(len(binStr)):\n c_new_i[n - 1 - j][0] = int(binStr[len(binStr) - 1 - j])\n c_new.append(c_new_i)\n D_new_i = torch.diag(c_new_i.T[0]).to(device)\n D_new.append(D_new_i)\n H_new_i = torch.diag((c_new_i * (-2) + 1).T[0]).to(device)\n H_new.append(H_new_i)\n\n return c_new, D_new, H_new", "def perm(inputByte, permTable):\n outputByte = 0\n for index, elem in enumerate(permTable):\n if index >= elem:\n outputByte |= (inputByte & (128 >> (elem - 1))) >> (index - (elem - 1))\n else:\n outputByte |= (inputByte & (128 >> (elem - 1))) << ((elem - 1) - index)\n return outputByte", "def permutation(q_1: Q, perm: str = \"txyz\") -> Q:\n\n if len(perm) != 4:\n raise ValueError(f\"The perm string must be 4 letters long: {perm}\")\n\n result = {}\n\n result[f\"{perm[0]}\"] = q_1.t\n result[f\"{perm[1]}\"] = q_1.x\n result[f\"{perm[2]}\"] = q_1.y\n result[f\"{perm[3]}\"] = q_1.z\n\n rearranged = []\n\n for letter in tuple(\"txyz\"):\n rearranged.append(result[letter])\n\n return Q(rearranged)", "def permutations_(n, r):\n return factorial(n) / factorial(n-r)", "def mat24_int_to_perm(k):\n oct, k = divmod(k, 322560)\n if oct >= 759: return None\n oct -= 759 - STD_OCTAD\n oct += (oct >> 12) & 759 # give number 0 to standard octad\n #print(\"i2p\", oct)\n oct = gc.octad_to_vect(oct);\n #print(\"i2p oct\", hex(oct))\n p = [None]*24\n oct, j = 8 * oct, 0x8 \n for i in range(24):\n o = oct & 8\n p[(j >> o) & 0x1f] = i\n j += 1 << o\n oct >>= 1\n p[8] = p[8 + (k & 15)]\n #print(\"i2pfinal\", k & 15)\n k >>= 4\n k *= (1 << 28) // 2520 + 1\n for i in range(6):\n k1 = i + (k >> 28)\n #print(\"i2p%d\" % i, k >> 28)\n p[i], p[k1] = p[k1], p[i] \n k = (k & 0xfffffff) * (7-i)\n mat24_complete_heptad(p)\n return p", "def get_perms(n):\n \n from itertools import permutations\n bases = 'CATGN'\n return [''.join(perm) for perm in permutations(bases, n)]", "def qubit_permutation(self, qubits: Sequence[cirq.Qid]\n ) -> Sequence[cirq.Qid]:\n # Every iteration reverses the qubit ordering due to the use of a\n # swap network\n if self.iterations & 1:\n return qubits[::-1]\n else:\n return qubits", "def invert_permutation(p):\n s = np.empty_like(p)\n s[p] = np.arange(p.size)\n return s", "def to_permutation(self):\n sp = SetPartitions(self.parent()._n)(self)\n perm = sp.to_permutation().to_cycles()\n return perm", "def TAoCPpermutation(n,k):\n perms = []\n for subset in itertools.combinations(range(n), k):\n A = []; B = []; C = []; min = 0; j = 0; up = 0\n for i in xrange(n):\n if(j>=k or i != subset[j]):\n B.append(i)\n up +=1\n else:\n up -=1\n j += 1\n if(up < min):\n min = up\n B.append(i)\n else:\n A.append(i)\n C.append(B.pop())\n perms.append(A+B+C)\n return perms", "def make_matrix(p, q):\n M = [[ele[0] * ele[1] for ele in itertools.product([player, 1 - player], \n [opponent, 1 - opponent])]\n for opponent in q for player in p]\n return np.array(M)", "def r_permutations(n, r):\n return math.factorial(n) / math.factorial(n - r)", "def int_to_perm(i, n=1):\n\twhile factorial(n) <= i:\n\t\tn += 1\n\tperm = [0 for _ in xrange(n)]\n\tfor k in xrange(n - 1, -1, -1):\n\t\tj, num_zeros = 0, 0\n\t\tkf = factorial(k)\n\t\t# position within the remaining (nonzero) slots\n\t\tpos = k - i / kf\n\t\ti %= kf\n\t\tplaced = False\n\t\twhile not placed:\n\t\t\tif perm[j] != 0:\n\t\t\t\tj += 1\n\t\t\telif num_zeros < pos:\n\t\t\t\tnum_zeros += 1\n\t\t\t\tj += 1\n\t\t\telse:\n\t\t\t\t# pos == num_zeros; you've found the right slot\n\t\t\t\tperm[j] = k + 1\n\t\t\t\tplaced = True\n\treturn Permutation(perm)", "def permute(p, dims, perm):\n if issparse(p):\n return _permute_sparse(p, dims, perm)\n return _permute_dense(p, dims, perm)", "def permute_2d(m, p):\r\n return m[p][:, p]\r\n # unused below\r\n m_t = transpose(m)\r\n r_t = take(m_t, p, axis=0)\r\n return take(transpose(r_t), p, axis=0)", "def permute(p,l,length):\n assert length >= 0\n if length == 0:\n\tprint p\n\treturn\n\n for i in range(0,length):\n\tn = p + (l[i],) \n\tpermute(n,l[0:i]+l[i+1:],length-1)", "def permutations(cube):\r\n yield from rotations24(cube)\r\n yield from rotations24(np.flip(cube, 0))\r\n yield from rotations24(np.flip(cube, 1))\r\n yield from rotations24(np.flip(cube, 2))" ]
[ "0.6811559", "0.65865314", "0.6399813", "0.63301665", "0.631787", "0.6282494", "0.6277264", "0.61535066", "0.61456126", "0.6065182", "0.60574985", "0.60513324", "0.5965659", "0.59486353", "0.59402007", "0.59321636", "0.59311527", "0.5897101", "0.5897032", "0.58776504", "0.58772075", "0.58695453", "0.5856797", "0.5815251", "0.5806948", "0.57928115", "0.57705194", "0.5753504", "0.57499826", "0.5742438" ]
0.66230685
1
Translate tk1 to a RzRxRz so AerUnitaryBackend can simulate
def _tk1_to_rotations(a: float, b: float, c: float) -> Circuit: circ = Circuit(1) circ.Rz(c, 0).Rx(b, 0).Rz(a, 0) return circ
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_controls(self):\n\n\n controls_keypress_QWERTY = {\n 'w': lambda: self.set_speed(\"pitch\", self.def_speed[\"pitch\"]),\n 's': lambda: self.set_speed(\"pitch\", -self.def_speed[\"pitch\"]),\n 'a': lambda: self.set_speed(\"roll\", -self.def_speed[\"roll\"]),\n 'd': lambda: self.set_speed(\"roll\", self.def_speed[\"roll\"]),\n 'q': lambda: self.set_speed(\"yaw\", -self.def_speed[\"yaw\"]),\n 'e': lambda: self.set_speed(\"yaw\", self.def_speed[\"yaw\"]),\n 'i': lambda: self.drone.flip_forward(),\n 'k': lambda: self.drone.flip_back(),\n 'j': lambda: self.drone.flip_left(),\n 'l': lambda: self.drone.flip_right(),\n 'Key.left': lambda: self.set_speed(\"yaw\", -1.5*self.def_speed[\"yaw\"]),\n 'Key.right': lambda: self.set_speed(\"yaw\", 1.5*self.def_speed[\"yaw\"]),\n 'Key.up': lambda: self.set_speed(\"throttle\", self.def_speed[\"throttle\"]),\n 'Key.down': lambda: self.set_speed(\"throttle\", -self.def_speed[\"throttle\"]),\n 'Key.tab': lambda: self.drone.takeoff(),\n 'Key.backspace': lambda: self.drone.land(),\n 'p': lambda: self.palm_land_approach(),\n 'v': lambda: self.toggle_use_voice(),\n 't': lambda: self.toggle_tracking(),\n 'k': lambda: self.toggle_distance_mode(),\n 'm': lambda: self.toogle_manual_control(),\n 'Key.enter': lambda: self.take_picture(),\n 'c': lambda: self.clockwise_degrees(360),\n \n \n \n \n \n \n # '0': lambda: self.drone.set_video_encoder_rate(0),\n # '1': lambda: self.drone.set_video_encoder_rate(1),\n # '2': lambda: self.drone.set_video_encoder_rate(2),\n # '3': lambda: self.drone.set_video_encoder_rate(3),\n # '4': lambda: self.drone.set_video_encoder_rate(4),\n # '5': lambda: self.drone.set_video_encoder_rate(5),\n\n '7': lambda: self.set_exposure(-1), \n '8': lambda: self.set_exposure(0),\n '9': lambda: self.set_exposure(1)\n }\n\n controls_keyrelease_QWERTY = {\n 'w': lambda: self.set_speed(\"pitch\", 0),\n 's': lambda: self.set_speed(\"pitch\", 0),\n 'a': lambda: self.set_speed(\"roll\", 0),\n 'd': lambda: self.set_speed(\"roll\", 0),\n 'q': lambda: self.set_speed(\"yaw\", 0),\n 'e': lambda: self.set_speed(\"yaw\", 0),\n 'Key.left': lambda: self.set_speed(\"yaw\", 0),\n 'Key.right': lambda: self.set_speed(\"yaw\", 0),\n 'Key.up': lambda: self.set_speed(\"throttle\", 0),\n 'Key.down': lambda: self.set_speed(\"throttle\", 0)\n }\n\n controls_keypress_AZERTY = {\n 'z': lambda: self.set_speed(\"pitch\", self.def_speed[\"pitch\"]),\n 's': lambda: self.set_speed(\"pitch\", -self.def_speed[\"pitch\"]),\n 'q': lambda: self.set_speed(\"roll\", -self.def_speed[\"roll\"]),\n 'd': lambda: self.set_speed(\"roll\", self.def_speed[\"roll\"]),\n 'a': lambda: self.set_speed(\"yaw\", -self.def_speed[\"yaw\"]),\n 'e': lambda: self.set_speed(\"yaw\", self.def_speed[\"yaw\"]),\n 'i': lambda: self.drone.flip_forward(),\n 'k': lambda: self.drone.flip_back(),\n 'j': lambda: self.drone.flip_left(),\n 'l': lambda: self.drone.flip_right(),\n 'Key.left': lambda: self.set_speed(\"yaw\", -1.5*self.def_speed[\"yaw\"]),\n 'Key.right': lambda: self.set_speed(\"yaw\", 1.5*self.def_speed[\"yaw\"]),\n 'Key.up': lambda: self.set_speed(\"throttle\", self.def_speed[\"throttle\"]),\n 'Key.down': lambda: self.set_speed(\"throttle\", -self.def_speed[\"throttle\"]),\n 'Key.tab': lambda: self.drone.takeoff(),\n 'Key.backspace': lambda: self.drone.land(),\n 'p': lambda: self.palm_land(),\n 't': lambda: self.toggle_tracking(),\n 'Key.enter': lambda: self.take_picture(),\n 'c': lambda: self.clockwise_degrees(360),\n '0': lambda: self.drone.set_video_encoder_rate(0),\n '1': lambda: self.drone.set_video_encoder_rate(1),\n '2': lambda: self.drone.set_video_encoder_rate(2),\n '3': lambda: self.drone.set_video_encoder_rate(3),\n '4': lambda: self.drone.set_video_encoder_rate(4),\n '5': lambda: self.drone.set_video_encoder_rate(5),\n\n '7': lambda: self.set_exposure(-1), \n '8': lambda: self.set_exposure(0),\n '9': lambda: self.set_exposure(1)\n }\n\n controls_keyrelease_AZERTY = {\n 'z': lambda: self.set_speed(\"pitch\", 0),\n 's': lambda: self.set_speed(\"pitch\", 0),\n 'q': lambda: self.set_speed(\"roll\", 0),\n 'd': lambda: self.set_speed(\"roll\", 0),\n 'a': lambda: self.set_speed(\"yaw\", 0),\n 'e': lambda: self.set_speed(\"yaw\", 0),\n 'Key.left': lambda: self.set_speed(\"yaw\", 0),\n 'Key.right': lambda: self.set_speed(\"yaw\", 0),\n 'Key.up': lambda: self.set_speed(\"throttle\", 0),\n 'Key.down': lambda: self.set_speed(\"throttle\", 0)\n }\n\n if self.kbd_layout == \"AZERTY\":\n self.controls_keypress = controls_keypress_AZERTY\n self.controls_keyrelease = controls_keyrelease_AZERTY\n else:\n self.controls_keypress = controls_keypress_QWERTY\n self.controls_keyrelease = controls_keyrelease_QWERTY\n self.key_listener = keyboard.Listener(on_press=self.on_press,\n on_release=self.on_release)\n self.key_listener.start()", "def update(self,z_t):\n # YOUR CODE HERE\n pass", "def setT1Button(self):\n self.T1Button = qt.QPushButton(\"Create T1 Mapping\")\n self.T1Button.toolTip = \"Create the T1 Mapping of the Scalar Volumes selected\"\n self.T1Button.enabled = False\n self.InputOutput_Layout.addRow(self.T1Button)", "def create_Toplevel1(rt, *args, **kwargs):\n global w, w_win, root\n # rt = root\n root = rt\n w = tk.Toplevel(root)\n plot_support.set_Tk_var()\n top = Toplevel1(w)\n plot_support.init(w, top, *args, **kwargs)\n return w, top", "def getSpinControl(*args):", "def _tf1_ ( self , *args ) :\n #\n if not hasattr ( self , '_wo1' ) : self._wo1 = _WO1_ ( self )\n if not self._wo1 : self._wo1 = _WO1_ ( self )\n ## \n _wo = self._wo1 \n fun = ROOT.TF1 ( funID() , _wo , *args )\n fun.SetNpx ( 500 ) \n #\n return fun", "def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/mapdvbt.png')\n self.placeControl(image, 0, 0, rowspan=10, columnspan=16)\n\n\t\t# TDT\n self.tdt_button = pyxbmct.RadioButton('')\n self.placeControl(self.tdt_button, 11, 1, rowspan=1, columnspan=4)\n self.connect(self.tdt_button, self.tdt_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'tdt', 2) == 1:\n self.tdt_button.setSelected(True)\n else:\n self.tdt_button.setSelected(False)\n tdt = pyxbmct.Image(addonfolder+artsfolder+'/tdt.png')\n self.placeControl(tdt, 11, 1, rowspan=1, columnspan=4)\n \n\t\t# Meo\n self.meo_button = pyxbmct.RadioButton('')\n self.placeControl(self.meo_button, 11, 6, rowspan=1, columnspan=4)\n self.connect(self.meo_button, self.meo_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'meo', 2) == 1:\n self.meo_button.setSelected(True)\n else:\n self.meo_button.setSelected(False)\n meo = pyxbmct.Image(addonfolder+artsfolder+'/meo.png')\n self.placeControl(meo, 11, 6, rowspan=1, columnspan=4)\n\n\t\t# Vodafone\n self.vodafone_button = pyxbmct.RadioButton('')\n self.placeControl(self.vodafone_button, 11, 11, rowspan=1, columnspan=4)\n self.connect(self.vodafone_button, self.vodafone_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'vodafone', 2) == 1:\n self.vodafone_button.setSelected(True)\n else:\n self.vodafone_button.setSelected(False)\n vodafone = pyxbmct.Image(addonfolder+artsfolder+'/vodafone.png')\n self.placeControl(vodafone, 11, 11, rowspan=1, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def test_drawing_tf():\r\n tf = pytest.importorskip(\"tensorflow\")\r\n\r\n x = tf.constant(0.1)\r\n y = tf.constant([0.2, 0.3])\r\n z = tf.Variable(0.4)\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n @qml.beta.qnode(dev, interface=\"tf\")\r\n def circuit(p1, p2=y, **kwargs):\r\n qml.RX(p1, wires=0)\r\n qml.RY(p2[0] * p2[1], wires=1)\r\n qml.RX(kwargs[\"p3\"], wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n result = qml.draw(circuit)(p1=x, p3=z)\r\n expected = \"\"\"\\\r\n 0: ──RX(0.1)───RX(0.4)──╭C──╭┤ ⟨Z ⊗ X⟩ \r\n 1: ──RY(0.06)───────────╰X──╰┤ ⟨Z ⊗ X⟩ \r\n\"\"\"\r\n\r\n assert result == expected", "def _regr_basic():", "def __init__(self):\n\n # The Microsoft XBox 360 Wired controller has 11 buttons and 8 axes.\n # Buttons can be 0 (not pressed) or 1 (pressed)\n # Axes are floats and range between -1 and 1. Note that for LT and RT, their \"not pressed\" value is 1 and for the others it is 0. Cross keys only have values -1, 0, and 1. The others have be any value in between -1 and 1.\n num_buttons = 11\n num_axes = 8\n self.inputs = [0 for i in range(num_buttons + num_axes)]\n self.inputs[JoyInput.LT] = self.inputs[JoyInput.RT] = 1\n\n # Dictionary of saved inputs. If an input is not currently saved, you must set it to None.\n # For example, the LS_Y (\"left stick Y\") axis may be saved in self.saved[JoyInput.LS_Y]\n self.saved = {\n JoyInput.LS_Y: None,\n Joystick.RS_ANGLE: None,\n }\n\n # Field variables\n self.depth_state = None # stores the depth state\n self.depth_last_received = 0 # how long since the last depth state callback\n self.depth_pwm_input = 0 # tracks pwm given to depth thrusters\n\n # ROS Subscribers\n rospy.Subscriber(\"/joy\", Joy, self.joy_callback)\n rospy.Subscriber(Topic.YAW_STATE, Float64, self.yaw_state_callback)\n rospy.Subscriber(Topic.DEPTH_STATE, Float64, self.depth_state_callback)\n rospy.Subscriber(Topic.YAW_SETPOINT, Float64, self.yaw_setpoint_callback)\n rospy.Subscriber(Topic.DEPTH_SETPOINT, Int16, self.depth_setpoint_callback)\n\n # ROS Publishers\n # self.topics is a dictionary of dictionaries.\n # 'publisher' contains the rospy.Publisher()\n # 'msg' contains the Int16(), Float64(), or Bool() related to the publisher\n # Use self.publish() rather than using self.topics directly.\n self.topics = {\n Topic.YAW_PWM: {'publisher':rospy.Publisher(Topic.YAW_PWM, Int16, queue_size=10), 'msg':Int16()},\n Topic.YAW_PWM_FEEDBACK: {'publisher':rospy.Publisher(Topic.YAW_PWM_FEEDBACK, Int16, queue_size=10), 'msg':Int16()},\n Topic.YAW_PID: {'publisher':rospy.Publisher(Topic.YAW_PID, Bool, queue_size=10), 'msg':Bool()},\n Topic.YAW_SETPOINT: {'publisher':rospy.Publisher(Topic.YAW_SETPOINT, Float64, queue_size=10), 'msg':Float64()},\n\n Topic.DEPTH_PWM: {'publisher':rospy.Publisher(Topic.DEPTH_PWM, Int16, queue_size=10), 'msg':Int16()},\n Topic.DEPTH_PID: {'publisher':rospy.Publisher(Topic.DEPTH_PID, Bool, queue_size=10), 'msg':Bool()},\n Topic.DEPTH_SETPOINT: {'publisher':rospy.Publisher(Topic.DEPTH_SETPOINT, Int16, queue_size=10), 'msg':Int16()},\n }", "def zext(self, typ):", "def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/kbox.png')\n self.placeControl(image, 0, 0, rowspan=10, columnspan=16)\n\n # DVBT\n self.kdvbt_button = pyxbmct.RadioButton('')\n self.placeControl(self.kdvbt_button, 11, 1, rowspan=1, columnspan=3)\n self.connect(self.kdvbt_button, self.kdvbt_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'kdvbt', 2) == 1:\n self.kdvbt_button.setSelected(True)\n else:\n self.kdvbt_button.setSelected(False)\n lnb1 = pyxbmct.Image(addonfolder+artsfolder+'/dvbt.png')\n self.placeControl(lnb1, 11, 1, rowspan=1, columnspan=3)\n\n # DVBC\n self.kdvbc_button = pyxbmct.RadioButton('')\n self.placeControl(self.kdvbc_button, 12, 1, rowspan=1, columnspan=3)\n self.connect(self.kdvbc_button, self.kdvbc_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'kdvbc', 2) == 1:\n self.kdvbc_button.setSelected(True)\n else:\n self.kdvbc_button.setSelected(False)\n lnb1 = pyxbmct.Image(addonfolder+artsfolder+'/dvbc.png')\n self.placeControl(lnb1, 12, 1, rowspan=1, columnspan=3)\n\n # DVBS2\n self.kdvbs_button = pyxbmct.RadioButton('')\n self.placeControl(self.kdvbs_button, 11, 6, rowspan=1, columnspan=3)\n self.connect(self.kdvbs_button, self.kdvbs_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'kdvbs', 2) == 1:\n self.kdvbs_button.setSelected(True)\n else:\n self.kdvbs_button.setSelected(False)\n lnb2 = pyxbmct.Image(addonfolder+artsfolder+'/dvbs2.png')\n self.placeControl(lnb2, 11, 6, rowspan=1, columnspan=3)\n\n # DVBT/DVBS2\n self.kdvbts_button = pyxbmct.RadioButton('')\n self.placeControl(self.kdvbts_button, 11, 11, rowspan=1, columnspan=3)\n self.connect(self.kdvbts_button, self.kdvbts_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'kdvbts', 2) == 1:\n self.kdvbts_button.setSelected(True)\n else:\n self.kdvbts_button.setSelected(False)\n both = pyxbmct.Image(addonfolder+artsfolder+'/dvbts2.png')\n self.placeControl(both, 11, 11, rowspan=1, columnspan=3)\n\n # DVBC/DVBS2\n self.kdvbcs_button = pyxbmct.RadioButton('')\n self.placeControl(self.kdvbcs_button, 12, 11, rowspan=1, columnspan=3)\n self.connect(self.kdvbcs_button, self.kdvbcs_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'kdvbcs', 2) == 1:\n self.kdvbcs_button.setSelected(True)\n else:\n self.kdvbcs_button.setSelected(False)\n both = pyxbmct.Image(addonfolder+artsfolder+'/dvbcs2.png')\n self.placeControl(both, 12, 11, rowspan=1, columnspan=3)\n\n # Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/tvh.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=17)\n\n\t\t# Wetek Button\n self.wetek_button = pyxbmct.RadioButton('')\n self.placeControl(self.wetek_button, 9, 1, rowspan=3, columnspan=3)\n self.connect(self.wetek_button, self.wetek_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wetek', 2) == 1:\n self.wetek_button.setSelected(True)\n else:\n self.wetek_button.setSelected(False)\n wetek = pyxbmct.Image(addonfolder+artsfolder+'/weteksmall.png')\n self.placeControl(wetek, 9, 1, rowspan=3, columnspan=3)\n\n\t\t# K Button\n self.k_button = pyxbmct.RadioButton('')\n self.placeControl(self.k_button, 9, 5, rowspan=3, columnspan=3)\n self.connect(self.k_button, self.k_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'k', 2) == 1:\n self.k_button.setSelected(True)\n else:\n self.k_button.setSelected(False)\n k = pyxbmct.Image(addonfolder+artsfolder+'/ksmall.png')\n self.placeControl(k, 9, 5, rowspan=3, columnspan=3)\n\n\t\t# Khadas Button\n self.khadas_button = pyxbmct.RadioButton('')\n self.placeControl(self.khadas_button, 9, 9, rowspan=3, columnspan=3)\n self.connect(self.khadas_button, self.khadas_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'khadas', 2) == 1:\n self.khadas_button.setSelected(True)\n else:\n self.khadas_button.setSelected(False)\n khadas = pyxbmct.Image(addonfolder+artsfolder+'/khadasmall.png')\n self.placeControl(khadas, 9, 9, rowspan=3, columnspan=3)\n\n\t\t# Generic Button\n self.generic_button = pyxbmct.RadioButton('')\n self.placeControl(self.generic_button, 9, 13, rowspan=3, columnspan=3)\n self.connect(self.generic_button, self.generic_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'generic', 2) == 1:\n self.generic_button.setSelected(True)\n else:\n self.generic_button.setSelected(False)\n generic = pyxbmct.Image(addonfolder+artsfolder+'/genericsmall.png')\n self.placeControl(generic, 9, 13, rowspan=3, columnspan=3)\n\t\t\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 16, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def convert_window(self, measurements, main_unit, unit):\n top = self.top = Toplevel(bg=\"Navy\") #Create the new windows\n top.title(measurements) #window name\n style = ttk.Style()\n style.map(\"TCombobox\", fieldbackground=[(\"readonly\", \"Navy\")])\n style.configure(\"TCombobox\", foreground=\"White\", bg=\"Navy\")\n style.configure(\"TSeparator\", background=\"Navy\")\n #Create the head label\n Label(top, text=measurements + \" Converter\", font=(\"Trebuchet MS\", 20, \"bold\"), fg=\"white\", bg=\"Navy\").pack(pady=20)\n outer_input_frame = Frame(top, bg=\"White\")\n input_frame = Frame(outer_input_frame, bg=\"White\")\n #Set text box to get input from user\n Label(input_frame, text=\" Enter the value\", font=(\"Trebuchet MS\", 11), fg=\"Navy\", bg=\"White\").grid(row=0, column=0, pady=5)\n self.v = DoubleVar()\n self.v.set(0)\n self.value = Entry(input_frame, textvariable=self.v, bg=\"Navy\", fg=\"White\")\n self.value.grid(row=1, column=0, padx=15, pady=5)\n ttk.Separator(input_frame, orient=\"horizontal\").grid(row=0, column=1, rowspan=2, sticky=\"ns\")\n #Create button for user to choose the unit \n Label(input_frame, text=\"Select the unit\", font=(\"Trebuchet MS\", 11), fg=\"Navy\", bg=\"White\").grid(row=0, column=2, padx=20, pady=5)\n self.dropdown = ttk.Combobox(input_frame, state=\"readonly\", values=unit, style=\"TCombobox\")\n self.dropdown.grid(row=1, column=2, padx=15, pady=5)\n self.dropdown.set(main_unit)\n input_frame.pack(pady=20)\n outer_input_frame.pack(pady=10, padx=20)\n #Create the scroll bar \n text_frame = Frame(top, bd=2, relief=GROOVE)\n yscrollbar = self.scrollbar = Scrollbar(text_frame)\n yscrollbar.pack(side=RIGHT, fill=Y)\n xscrollbar = self.scrollbar = Scrollbar(text_frame, orient=HORIZONTAL)\n xscrollbar.pack(side=BOTTOM, fill=X)\n self.text = Text(text_frame, wrap=NONE, width=45, height=15, yscrollcommand=yscrollbar.set, xscrollcommand=xscrollbar.set)\n self.text.pack(fill=BOTH, expand=YES)\n yscrollbar.config(command=self.text.yview)\n xscrollbar.config(command=self.text.xview)\n text_frame.pack(fill=BOTH, side=BOTTOM, expand=YES, padx=5, pady=5)\n self.top.bind(\"<Key>\", {\"Angle\": self.convert_angle, \"Area\": self.convert_area, \"Bit Byte\": self.convert_bitbyte, \"Density\": self.convert_density, \"Electric Current\": self.convert_electriccurrent, \"Energy\": self.convert_energy, \"Force\": self.convert_force, \"Fuel Consumption\": self.convert_fuelconsumption, \"Length\": self.convert_length, \"Mass\": self.convert_mass, \"Power\": self.convert_power, \"Pressure\": self.convert_pressure, \"Speed\": self.convert_speed, \"Temperature\": self.convert_temperature, \"Time\": self.convert_time, \"Volume\": self.convert_volume}[measurements])\n self.dropdown.bind(\"<<ComboboxSelected>>\", {\"Angle\": self.convert_angle, \"Area\": self.convert_area, \"Bit Byte\": self.convert_bitbyte, \"Density\": self.convert_density, \"Electric Current\": self.convert_electriccurrent, \"Energy\": self.convert_energy, \"Force\": self.convert_force, \"Fuel Consumption\": self.convert_fuelconsumption, \"Length\": self.convert_length, \"Mass\": self.convert_mass, \"Power\": self.convert_power, \"Pressure\": self.convert_pressure, \"Speed\": self.convert_speed, \"Temperature\": self.convert_temperature, \"Time\": self.convert_time, \"Volume\": self.convert_volume}[measurements])\n self.top.bind(\"<Control_L>\", self.copy)\n self.top.bind(\"<Control_R>\", self.copy)", "def set_controls(self):\n image = pyxbmct.Image(addonfolder+artsfolder+'/khadasdvb.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\t\t\n\t\t# DVB-C\n self.dvbc_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbc_button, 10, 1, rowspan=2, columnspan=4)\n self.connect(self.dvbc_button, self.dvbc_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'khadasdvbc', 2) == 1:\n self.dvbc_button.setSelected(True)\n else:\n self.dvbc_button.setSelected(False)\n dvbc = pyxbmct.Image(addonfolder+artsfolder+'/dvbc.png')\n self.placeControl(dvbc, 10, 1, rowspan=2, columnspan=4)\n \n\t\t# DVB-S\n self.dvbs_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbs_button, 10, 6, rowspan=2, columnspan=4)\n self.connect(self.dvbs_button, self.dvbs_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'khadasdvbs', 2) == 1:\n self.dvbs_button.setSelected(True)\n else:\n self.dvbs_button.setSelected(False)\n dvbs = pyxbmct.Image(addonfolder+artsfolder+'/dvbs2.png')\n self.placeControl(dvbs, 10, 6, rowspan=2, columnspan=4)\n\n\t\t# DVB-T\n self.dvbt_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbt_button, 10, 11, rowspan=2, columnspan=4)\n self.connect(self.dvbt_button, self.dvbt_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'khadasdvbt', 2) == 1:\n self.dvbt_button.setSelected(True)\n else:\n self.dvbt_button.setSelected(False)\n dvbt = pyxbmct.Image(addonfolder+artsfolder+'/dvbt.png')\n self.placeControl(dvbt, 10, 11, rowspan=2, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def circuit_one_qubit_one_param_rx_ry(inpt):\n qml.RX(inpt[0], wires=0)\n qml.RY(inpt[0], wires=0)\n return qml.expval(qml.PauliZ(0))", "def __init__(self, tkRoot, title):\n # create an instance of the class for ATV Automation\n self.tv = ActionScript()\n self.rc = SonyRCKey()\n self.app = AppList()\n\n # Initialize tkRoot as the Tk() instance\n self.tkRoot = tkRoot\n self.tkRoot.title(title) # Change title for each test\n self.tkRoot.iconbitmap(\"img/bot_icon.ico\")\n self.tkRoot.geometry(\"1200x480\")\n\n # Create frame for header\n self.headerFrame = ttk.Frame(self.tkRoot)\n self.headerFrame.pack(fill=X)\n\n # Create canvas Testcase Instructions\n self.sideCanvas = Canvas(self.tkRoot)\n self.sideCanvas.pack(fill=BOTH, side=LEFT)\n\n # Create Frame for Testcase Instructions\n self.sideFrame = ttk.Frame(self.sideCanvas)\n self.sideFrame.pack(fill=BOTH, side=LEFT)\n\n # Create canvas for Testcase running\n self.testCanvas = Canvas(self.tkRoot)\n self.testCanvas.pack(fill=BOTH, side=LEFT, expand=True)\n\n # add scrollbar inside testcase canvas\n self.scrollbar = Scrollbar(self.tkRoot, command=self.testCanvas.yview)\n self.scrollbar.pack(fill=Y, side=RIGHT, expand=False)\n\n # Create frame for Testcase running\n self.testFrame = ttk.Frame(self.testCanvas)\n self.testFrame.pack(fill=BOTH, side=LEFT, expand=True)\n\n # configure canvas and scrollbar\n self.testCanvas.configure(yscrollcommand=self.scrollbar.set)\n\n # put sideframe in sidecanvas\n self.sideCanvas.create_window(\n (0, 0), window=self.sideFrame, anchor='nw', width=400)\n\n # put testFrame in testCanvas\n self.testCanvas.create_window(\n (0, 0), window=self.testFrame, anchor='nw', width=800)\n\n # Create a custom font\n self.mainFont = tkFont.Font(\n family=\"Helvetica\", size=14, weight=tkFont.NORMAL)\n self.sideFont = tkFont.Font(\n family=\"Helvetica\", size=14, weight=tkFont.NORMAL)\n self.buttonFont = tkFont.Font(\n family=\"Helvetica\", size=10, weight=tkFont.BOLD)\n self.boldFont = tkFont.Font(\n family=\"Helvetica\", size=14, weight=tkFont.BOLD)\n\n # Initialize flags for background of the labels and loop count\n self.bgCounter = 0\n self.loopCount = IntVar()\n self.loopCount.set(1)\n self.loopCounterUI = IntVar() # loop counter UI\n self.loopCounterUI.set(0)\n self.deviceID = StringVar()\n self.stopLoop = False\n self.countLoopReset = 0\n\n # Initialize button so we can access it on any functions\n self.btnStart = Button()\n self.btnStop = Button()\n self.txtLoop = Entry()\n self.labelLoop = Label()\n self.txtDeviceID = Entry()\n self.labelDeviceID = Label()\n self.LabelLists = []\n self.tsFormat = '%Y-%m-%d, %I:%M:%S %p'\n self.playback_time = 0.3", "def ex13gui():\n\n #pylint: disable=E1101\n root = tkcomponents.create('Simple interest')\n\n principal_label = rx.Observable.just('What is the principal amount?')\n principal = tkcomponents.input_stream(root, principal_label, 0).map(decimal.Decimal)\n\n rate_label = rx.Observable.just('What is the rate?')\n rate = tkcomponents.input_stream(root, rate_label, 1).map(decimal.Decimal)\n\n term_label = rx.Observable.just('What is the number of years?')\n term = tkcomponents.input_stream(root, term_label, 2).map(int).map(decimal.Decimal)\n\n frequency_label = rx.Observable.just(\n 'What is the number of times interest is compounded per year?')\n frequency = tkcomponents.input_stream(root, frequency_label, 3).map(int).map(decimal.Decimal)\n\n tkcomponents.output_label(\n root,\n rx.Observable.combine_latest(principal, rate, term, frequency, calculate_amount), 4)\n root.mainloop()", "def circuit_one_qubit_one_param_rx(inpt):\n qml.RX(inpt[0], wires=0)\n return qml.expval(qml.PauliZ(0))", "def run(self):\n self.root = root = Tk()\n self.root.columnconfigure(0, weight=1) # center component\n # self.root.config(bg='Thistle')\n root.geometry('{}x{}'.format(350, 500))\n root.resizable(width=False, height=False)\n root.title(_('Welcome Money Exchange'))\n\n # set the window icon\n img = PhotoImage(file='img/scpp_global.png')\n root.tk.call('wm', 'iconphoto', root._w, img)\n\n path = \"img/coin_base.png\"\n # Creates a Tkinter-compatible photo image, which can be used everywhere Tkinter expects an image object.\n img = ImageTk.PhotoImage(Image.open(path))\n\n self.topBar = Frame(self.root, border=1, relief=GROOVE)\n self.topBar.grid(row=0, column=0, columnspan=2, sticky=E + W + N + S)\n self.topBar.columnconfigure(0, weight=1)\n\n l0 = Label(self.topBar, image=img).grid(row=1, column=0, columnspan=2, sticky=N, padx=5, pady=35)\n l1 = Label(self.topBar, text=\" Coin Rate :\", fg=\"red\", font=(\"Helvetica\", 16)).grid(row=1, column=0,\n sticky=W + S, pady=5,\n padx=5)\n self.l2 = Label(self.topBar, text=\"\", font=(\"Helvetica\", 16), anchor=W)\n\n b1 = Button(self.root, text=_('Refresh Coin Value'), command=self.getCoinValue, width=30,\n background='green').grid(row=1, column=0, pady=5, padx=5)\n\n b2 = Button(self.root, text=_('View Transaction Root Details'), command=self.onDatabaseLog, width=30)\n b3 = Button(self.root, text=_('View Log File'), command=self.onViewLog, width=30)\n b4 = Button(self.root, text=_('Map Rule Define'), command=self.putTestData, width=30)\n b5 = Button(self.root, text=_('Exit'), command=self.onExit, width=10, background='red')\n\n self.l2.grid(row=1, column=0, sticky=E + S, pady=5, padx=5)\n b2.grid(row=3, column=0, pady=5, padx=5)\n b3.grid(row=4, column=0, pady=5, padx=5)\n b4.grid(row=5, column=0, pady=5, padx=5)\n b5.grid(row=6, column=0, pady=5, padx=5)\n\n self.getCoinValue();\n self.center(root)\n root.mainloop()", "def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/wplay.png')\n self.placeControl(image, 0, 0, rowspan=10, columnspan=16)\n\n # LNB1\n self.wplnb1_button = pyxbmct.RadioButton('')\n self.placeControl(self.wplnb1_button, 11, 1, rowspan=1, columnspan=4)\n self.connect(self.wplnb1_button, self.wplnb1_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wplnb1', 2) == 1:\n self.wplnb1_button.setSelected(True)\n else:\n self.wplnb1_button.setSelected(False)\n lnb1 = pyxbmct.Image(addonfolder+artsfolder+'/lnb1.png')\n self.placeControl(lnb1, 11, 1, rowspan=1, columnspan=4)\n\n # LNB2\n self.wplnb2_button = pyxbmct.RadioButton('')\n self.placeControl(self.wplnb2_button, 11, 6, rowspan=1, columnspan=4)\n self.connect(self.wplnb2_button, self.wplnb2_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wplnb2', 2) == 1:\n self.wplnb2_button.setSelected(True)\n else:\n self.wplnb2_button.setSelected(False)\n lnb2 = pyxbmct.Image(addonfolder+artsfolder+'/lnb2.png')\n self.placeControl(lnb2, 11, 6, rowspan=1, columnspan=4)\n\n # LNB1/LNB2\n self.wplnboth_button = pyxbmct.RadioButton('')\n self.placeControl(self.wplnboth_button, 11, 11, rowspan=1, columnspan=4)\n self.connect(self.wplnboth_button, self.wplnboth_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wplnboth', 2) == 1:\n self.wplnboth_button.setSelected(True)\n else:\n self.wplnboth_button.setSelected(False)\n both = pyxbmct.Image(addonfolder+artsfolder+'/both.png')\n self.placeControl(both, 11, 11, rowspan=1, columnspan=4)\n\n # Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def setup_axes2(fig, rect,tmin, tmax,zmin,zmax):\n\n tr =PolarAxes.PolarTransform()\n pi = np.pi\n\n angle_ticks = [(tmin, '%.2f' % tmin), (0,r'$0$'), (tmax, '%.2f' % tmax)]\n\n grid_locator1 = FixedLocator([v for v, s in angle_ticks])\n tick_formatter1 = DictFormatter(dict(angle_ticks))\n\n grid_locator2 = MaxNLocator(4)\n\n grid_helper = floating_axes.GridHelperCurveLinear(\n tr, extremes=(tmax, tmin, zmax, zmin),\n grid_locator1=grid_locator1,\n grid_locator2=grid_locator2,\n tick_formatter1=tick_formatter1,\n tick_formatter2=None)\n\n ax1 = floating_axes.FloatingSubplot(fig, rect, grid_helper=grid_helper)\n fig.add_subplot(ax1)\n\n # create a parasite axes whose transData in RA, cz\n aux_ax = ax1.get_aux_axes(tr)\n\n aux_ax.patch = ax1.patch # for aux_ax to have a clip path as in ax\n ax1.patch.zorder = 0.95 # but this has a side effect that the patch is\n # drawn twice, and possibly over some other\n # artists. So, we decrease the zorder a bit to\n # prevent this.\n\n return ax1, aux_ax", "def set_controls(self):\n image = pyxbmct.Image(addonfolder+artsfolder+'/dvb.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\t\t\n\t\t# DVB-C\n self.dvbc_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbc_button, 10, 1, rowspan=2, columnspan=4)\n self.connect(self.dvbc_button, self.dvbc_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wdvbc', 2) == 1:\n self.dvbc_button.setSelected(True)\n else:\n self.dvbc_button.setSelected(False)\n dvbc = pyxbmct.Image(addonfolder+artsfolder+'/dvbc.png')\n self.placeControl(dvbc, 10, 1, rowspan=2, columnspan=4)\n \n\t\t# DVB-S\n self.dvbs_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbs_button, 10, 6, rowspan=2, columnspan=4)\n self.connect(self.dvbs_button, self.dvbs_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wdvbs', 2) == 1:\n self.dvbs_button.setSelected(True)\n else:\n self.dvbs_button.setSelected(False)\n dvbs = pyxbmct.Image(addonfolder+artsfolder+'/dvbs2.png')\n self.placeControl(dvbs, 10, 6, rowspan=2, columnspan=4)\n\n\t\t# DVB-T\n self.dvbt_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbt_button, 10, 11, rowspan=2, columnspan=4)\n self.connect(self.dvbt_button, self.dvbt_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'wdvbt', 2) == 1:\n self.dvbt_button.setSelected(True)\n else:\n self.dvbt_button.setSelected(False)\n dvbt = pyxbmct.Image(addonfolder+artsfolder+'/dvbt.png')\n self.placeControl(dvbt, 10, 11, rowspan=2, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def set_controls(self):\n image = pyxbmct.Image(addonfolder+artsfolder+'/dvb.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\t\t\n\t\t# DVB-C\n self.dvbc_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbc_button, 10, 1, rowspan=2, columnspan=4)\n self.connect(self.dvbc_button, self.dvbc_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'gdvbc', 2) == 1:\n self.dvbc_button.setSelected(True)\n else:\n self.dvbc_button.setSelected(False)\n dvbc = pyxbmct.Image(addonfolder+artsfolder+'/dvbc.png')\n self.placeControl(dvbc, 10, 1, rowspan=2, columnspan=4)\n \n\t\t# DVB-S\n self.dvbs_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbs_button, 10, 6, rowspan=2, columnspan=4)\n self.connect(self.dvbs_button, self.dvbs_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'gdvbs', 2) == 1:\n self.dvbs_button.setSelected(True)\n else:\n self.dvbs_button.setSelected(False)\n dvbs = pyxbmct.Image(addonfolder+artsfolder+'/dvbs2.png')\n self.placeControl(dvbs, 10, 6, rowspan=2, columnspan=4)\n\n\t\t# DVB-T\n self.dvbt_button = pyxbmct.RadioButton('')\n self.placeControl(self.dvbt_button, 10, 11, rowspan=2, columnspan=4)\n self.connect(self.dvbt_button, self.dvbt_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'gdvbt', 2) == 1:\n self.dvbt_button.setSelected(True)\n else:\n self.dvbt_button.setSelected(False)\n dvbt = pyxbmct.Image(addonfolder+artsfolder+'/dvbt.png')\n self.placeControl(dvbt, 10, 11, rowspan=2, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/mapdvbc.png')\n self.placeControl(image, 0, 0, rowspan=10, columnspan=16)\n\n\t\t# Nos\n self.nos_button = pyxbmct.RadioButton('')\n self.placeControl(self.nos_button, 10, 3, rowspan=1, columnspan=4)\n self.connect(self.nos_button, self.nos_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'nos', 2) == 1:\n self.nos_button.setSelected(True)\n else:\n self.nos_button.setSelected(False)\n nos = pyxbmct.Image(addonfolder+artsfolder+'/nos.png')\n self.placeControl(nos, 10, 3, rowspan=1, columnspan=4)\n\n\t\t# Nos Madeira\n self.madeira_button = pyxbmct.RadioButton('')\n self.placeControl(self.madeira_button, 12, 6, rowspan=1, columnspan=4)\n self.connect(self.madeira_button, self.madeira_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'madeira', 2) == 1:\n self.madeira_button.setSelected(True)\n else:\n self.madeira_button.setSelected(False)\n madeira = pyxbmct.Image(addonfolder+artsfolder+'/madeira.png')\n self.placeControl(madeira, 12, 6, rowspan=1, columnspan=4)\n\n\t\t# Nowo\n self.nowo_button = pyxbmct.RadioButton('')\n self.placeControl(self.nowo_button, 10, 9, rowspan=1, columnspan=4)\n self.connect(self.nowo_button, self.nowo_button_update)\n if tools.return_data('TVHWIZARD', 'STRING', 'nowo', 2) == 1:\n self.nowo_button.setSelected(True)\n else:\n self.nowo_button.setSelected(False)\n nowo = pyxbmct.Image(addonfolder+artsfolder+'/nowo.png')\n self.placeControl(nowo, 10, 9, rowspan=1, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def test_drawing_jax():\r\n jax = pytest.importorskip(\"jax\")\r\n jnp = jax.numpy\r\n\r\n x = jnp.array(0.1)\r\n y = jnp.array([0.2, 0.3])\r\n z = jnp.array(0.4)\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n @qml.beta.qnode(dev, interface=\"jax\")\r\n def circuit(p1, p2=y, **kwargs):\r\n qml.RX(p1, wires=0)\r\n qml.RY(p2[0] * p2[1], wires=1)\r\n qml.RX(kwargs[\"p3\"], wires=0)\r\n qml.CNOT(wires=[0, 1])\r\n return qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n result = qml.draw(circuit)(p1=x, p3=z)\r\n expected = \"\"\"\\\r\n 0: ──RX(0.1)───RX(0.4)──╭C──╭┤ ⟨Z ⊗ X⟩ \r\n 1: ──RY(0.06)───────────╰X──╰┤ ⟨Z ⊗ X⟩ \r\n\"\"\"\r\n\r\n assert result == expected", "def t2r( self , t ):\n \n #Default is a constant signal\n r = self.rbar\n \n return r", "def main():\n root = tkinter.Tk()\n # construct com.MqttClient clint && make it connect to ev3\n\n client = com.MqttClient()\n client.connect_to_ev3()\n\n setup_gui(root, client)\n\n\n root.mainloop()", "def vp_start_gui():\n global val, w, root\n root = tk.Tk()\n plot_support.set_Tk_var()\n top = Toplevel1(root)\n plot_support.init(root, top)\n root.mainloop()", "def retranslateUi(self, MainWindow):\n _translate = QtCore.QCoreApplication.translate\n MainWindow.setWindowTitle(_translate(\"MainWindow\", \"ROC - Robot Operational Controller\"))\n self.qrcode_App_Button.setToolTip(_translate(\"MainWindow\", \"Opens the QR Code Reader\"))\n self.qrcode_App_Button.setText(_translate(\"MainWindow\", \"QR App\"))\n self.data_Button.setToolTip(_translate(\"MainWindow\", \"Opens the Data Dashboard\"))\n self.data_Button.setText(_translate(\"MainWindow\", \"Data\"))\n self.config_Button.setToolTip(_translate(\"MainWindow\", \"Configure the ROC\"))\n self.config_Button.setText(_translate(\"MainWindow\", \"Options\"))\n self.docs_Button.setToolTip(_translate(\"MainWindow\", \"Go to the Documentation Website\"))\n self.docs_Button.setText(_translate(\"MainWindow\", \"Docs\"))\n self.about_Button.setToolTip(_translate(\"MainWindow\", \"Contact and About\"))\n self.about_Button.setText(_translate(\"MainWindow\", \"About\"))\n self.robot_Viewer_Label.setText(_translate(\"MainWindow\", \"ROBOT VIEWER\"))\n self.terminalWidget.setTabText(self.terminalWidget.indexOf(self.urxvtWidget), _translate(\"MainWindow\", \"urxvt\"))\n self.robot_TB1_Status.setToolTip(_translate(\"MainWindow\", \"Ping and Show Robot 1 Status (GREY = DESABLED, BLACK = OFF, LIGHT = ON, RED = INACTIVE)\"))\n self.robot_TB1_Status.setText(_translate(\"MainWindow\", \"TB1 STATUS\"))\n self.robot_TB1_Viewer.setToolTip(_translate(\"MainWindow\", \"Open Robot 1 Viewer\"))\n self.robot_TB1_Viewer.setText(_translate(\"MainWindow\", \"ROBOT 1\"))\n self.robot_TB2_Viewer.setToolTip(_translate(\"MainWindow\", \"Open Robot 2 Viewer\"))\n self.robot_TB2_Viewer.setText(_translate(\"MainWindow\", \"ROBOT 2\"))\n self.robot_TB3_Viewer.setToolTip(_translate(\"MainWindow\", \"Open Robot 3 Viewer\"))\n self.robot_TB3_Viewer.setText(_translate(\"MainWindow\", \"ROBOT 3\"))\n self.robot_TB4_Viewer.setToolTip(_translate(\"MainWindow\", \"Open Robot 4 Viewer\"))\n self.robot_TB4_Viewer.setText(_translate(\"MainWindow\", \"ROBOT 4\"))\n self.robot_TB2_Status.setToolTip(_translate(\"MainWindow\", \"Ping and Show Robot 2 Status (GREY = DESABLED, BLACK = OFF, LIGHT = ON, RED = INACTIVE)\"))\n self.robot_TB2_Status.setText(_translate(\"MainWindow\", \"TB2 STATUS\"))\n self.robot_TB3_Status.setToolTip(_translate(\"MainWindow\", \"Ping and Show Robot 3 Status (GREY = DESABLED, BLACK = OFF, LIGHT = ON, RED = INACTIVE)\"))\n self.robot_TB3_Status.setText(_translate(\"MainWindow\", \"TB3 STATUS\"))\n self.robot_TB4_Status.setToolTip(_translate(\"MainWindow\", \"Ping and Show Robot 4 Status (GREY = DESABLED, BLACK = OFF, LIGHT = ON, RED = INACTIVE)\"))\n self.robot_TB4_Status.setText(_translate(\"MainWindow\", \"TB4 STATUS\"))\n self.robot_TB1.setTitle(_translate(\"MainWindow\", \" Robot 1 (TB1)\"))\n self.configure_TB1_Button.setToolTip(_translate(\"MainWindow\", \"Opens the Settings for Robot 1 (TB1)\"))\n self.configure_TB1_Button.setText(_translate(\"MainWindow\", \"Settings\"))\n self.logs_TB1_Button.setToolTip(_translate(\"MainWindow\", \"Open the Logs for Robot 1 (TB1)\"))\n self.logs_TB1_Button.setText(_translate(\"MainWindow\", \"Logs\"))\n self.floor_TB1_Show.setToolTip(_translate(\"MainWindow\", \"Shows the Floor Map\"))\n self.floor_TB1_Show.setText(_translate(\"MainWindow\", \"FLOOR\"))\n self.kinect_TB1_Show.setToolTip(_translate(\"MainWindow\", \"Shows the Kinect Camera\"))\n self.kinect_TB1_Show.setText(_translate(\"MainWindow\", \"KINECT\"))\n self.gmapp_TB1_Show.setToolTip(_translate(\"MainWindow\", \"Shows the GMAPP\"))\n self.gmapp_TB1_Show.setText(_translate(\"MainWindow\", \"GMAPP\"))\n self.camera_TB1_Show.setToolTip(_translate(\"MainWindow\", \"Shows the Notebook Camera\"))\n self.camera_TB1_Show.setText(_translate(\"MainWindow\", \"CAMERA\"))\n self.on_TB1_Viewer.setToolTip(_translate(\"MainWindow\", \"Turn Viewer On\"))\n self.on_TB1_Viewer.setText(_translate(\"MainWindow\", \"ON\"))\n self.off_TB1_Viewer.setToolTip(_translate(\"MainWindow\", \"Turn Viewer Off\"))\n self.off_TB1_Viewer.setText(_translate(\"MainWindow\", \"OFF\"))\n self.reload_TB1.setToolTip(_translate(\"MainWindow\", \"Reload the Viewer\"))\n self.reload_TB1.setText(_translate(\"MainWindow\", \"RELOAD\"))\n self.reset_TB1.setToolTip(_translate(\"MainWindow\", \"Reset the Viewer\"))\n self.reset_TB1.setText(_translate(\"MainWindow\", \"RESET\"))\n self.valuesTB1Frame.setToolTip(_translate(\"MainWindow\", \"Shows Robot 1 (TB1) Data\"))\n self.x_TB1_Label.setText(_translate(\"MainWindow\", \"X:\"))\n self.y_TB1_Label.setText(_translate(\"MainWindow\", \"Y:\"))\n self.velocity_TB1_Label.setText(_translate(\"MainWindow\", \"Velocity:\"))\n self.battery_TB1_Label.setText(_translate(\"MainWindow\", \"Battery:\"))\n self.kinnect_TB1_Screen.setToolTip(_translate(\"MainWindow\", \"Image from the Kinnect\"))\n self.viewer_TB1.setTabText(self.viewer_TB1.indexOf(self.kinnect_TB1_Screen), _translate(\"MainWindow\", \"ROBOT\"))\n self.viewer_TB1.setTabText(self.viewer_TB1.indexOf(self.gmapp_TB1_Screen), _translate(\"MainWindow\", \"GMAPP\"))\n self.viewer_TB1.setTabText(self.viewer_TB1.indexOf(self.floor_TB1_Screen), _translate(\"MainWindow\", \"FLOOR\"))\n self.viewer_TB1.setTabText(self.viewer_TB1.indexOf(self.camera_TB1_Screen), _translate(\"MainWindow\", \"CAMERA\"))\n self.robot_Selection_Label.setText(_translate(\"MainWindow\", \"CONFIGURATIONS\"))\n self.robot_Selection_TypeLabel.setText(_translate(\"MainWindow\", \"ROBOT TYPE:\"))\n self.robot_Selection_Type.setToolTip(_translate(\"MainWindow\", \"Select the Robot Type\"))\n self.robot_Selection_Type.setItemText(0, _translate(\"MainWindow\", \"NONE\"))\n self.robot_Selection_Type.setItemText(1, _translate(\"MainWindow\", \"TURTLEBOT\"))\n self.robot_Selection_Type.setItemText(2, _translate(\"MainWindow\", \"DRONE\"))\n self.robot_Selection_Role.setToolTip(_translate(\"MainWindow\", \"Select the Robot Role\"))\n self.robot_Selection_Role.setItemText(0, _translate(\"MainWindow\", \"NONE\"))\n self.robot_Selection_Role.setItemText(1, _translate(\"MainWindow\", \"ROLE 1\"))\n self.robot_Selection_Role.setItemText(2, _translate(\"MainWindow\", \"ROLE 2\"))\n self.robot_Selection_RoleLabel.setText(_translate(\"MainWindow\", \"ROBOT ROLE:\"))\n self.robot_Selection_TaskLabel.setText(_translate(\"MainWindow\", \"ROBOT TASK:\"))\n self.robot_Selection_Task.setToolTip(_translate(\"MainWindow\", \"Select the Robot Task\"))\n self.robot_Selection_Task.setItemText(0, _translate(\"MainWindow\", \"NONE\"))\n self.robot_Selection_Task.setItemText(1, _translate(\"MainWindow\", \"TASK 1\"))\n self.robot_Selection_Task.setItemText(2, _translate(\"MainWindow\", \"TASK 2\"))\n self.robot_Selection_BehaviorLabel.setText(_translate(\"MainWindow\", \"BEHAVIOR:\"))\n self.robot_Selection_Behavior.setToolTip(_translate(\"MainWindow\", \"Select an Behavior\"))\n self.robot_Selection_Behavior.setItemText(0, _translate(\"MainWindow\", \"NONE\"))\n self.robot_Selection_Behavior.setItemText(1, _translate(\"MainWindow\", \"TASK 1\"))\n self.robot_Selection_Behavior.setItemText(2, _translate(\"MainWindow\", \"TASK 2\"))\n self.robot_Selection_Experiment.setToolTip(_translate(\"MainWindow\", \"Select a Experiment\"))\n self.robot_Selection_Experiment.setItemText(0, _translate(\"MainWindow\", \"NONE\"))\n self.robot_Selection_Experiment.setItemText(1, _translate(\"MainWindow\", \"EXP. 1\"))\n self.robot_Selection_Experiment.setItemText(2, _translate(\"MainWindow\", \"EXP. 2\"))\n self.robot_Selection_ExpLabel.setText(_translate(\"MainWindow\", \"EXPERIMENT:\"))\n self.set_Selection_Values.setToolTip(_translate(\"MainWindow\", \"Set the Values Selected\"))\n self.set_Selection_Values.setText(_translate(\"MainWindow\", \"SET\"))\n self.reset_Selection_Values.setToolTip(_translate(\"MainWindow\", \"Reset the Values Selected\"))\n self.reset_Selection_Values.setText(_translate(\"MainWindow\", \"R\"))\n self.robot_TB1_Selection.setToolTip(_translate(\"MainWindow\", \"Select Robot 1\"))\n self.robot_TB1_Selection.setText(_translate(\"MainWindow\", \"1\"))\n self.robot_TB2_Selection.setToolTip(_translate(\"MainWindow\", \"Select Robot 2\"))\n self.robot_TB2_Selection.setText(_translate(\"MainWindow\", \"2\"))\n self.robot_TB4_Selection.setToolTip(_translate(\"MainWindow\", \"Select Robot 4\"))\n self.robot_TB4_Selection.setText(_translate(\"MainWindow\", \"4\"))\n self.robot_TB3_Selection.setToolTip(_translate(\"MainWindow\", \"Select Robot 3\"))\n self.robot_TB3_Selection.setText(_translate(\"MainWindow\", \"3\"))\n self.run_Selection_Values.setToolTip(_translate(\"MainWindow\", \"Run / Start the Experiment\"))\n self.run_Selection_Values.setText(_translate(\"MainWindow\", \"RUN\"))\n self.down_Selection_Values.setToolTip(_translate(\"MainWindow\", \"Shut Down the Robot\"))\n self.down_Selection_Values.setText(_translate(\"MainWindow\", \"D\"))\n self.robot_TB2.setTitle(_translate(\"MainWindow\", \" Robot 2 (TB2)\"))\n self.configure_TB2_Button.setText(_translate(\"MainWindow\", \"Settings\"))\n self.logs_TB2_Button.setText(_translate(\"MainWindow\", \"Logs\"))\n self.floor_TB2_Show.setText(_translate(\"MainWindow\", \"FLOOR\"))\n self.kinect_TB2_Show.setText(_translate(\"MainWindow\", \"KINECT\"))\n self.gmapp_TB2_Show.setText(_translate(\"MainWindow\", \"GMAPP\"))\n self.camera_TB2_Show.setText(_translate(\"MainWindow\", \"CAMERA\"))\n self.on_TB2_Viewer.setText(_translate(\"MainWindow\", \"ON\"))\n self.off_TB2_Viewer.setText(_translate(\"MainWindow\", \"OFF\"))\n self.reload_TB2.setText(_translate(\"MainWindow\", \"RELOAD\"))\n self.reset_TB2.setText(_translate(\"MainWindow\", \"RESET\"))\n self.x_TB2_Label.setText(_translate(\"MainWindow\", \"X:\"))\n self.y_TB2_Label.setText(_translate(\"MainWindow\", \"Y:\"))\n self.velocity_TB2_Label.setText(_translate(\"MainWindow\", \"Velocity:\"))\n self.battery_TB2_Label.setText(_translate(\"MainWindow\", \"Battery:\"))\n self.kinnect_TB2_Screen.setToolTip(_translate(\"MainWindow\", \"Image from the Kinnect\"))\n self.viewer_TB2.setTabText(self.viewer_TB2.indexOf(self.kinnect_TB2_Screen), _translate(\"MainWindow\", \"ROBOT\"))\n self.viewer_TB2.setTabText(self.viewer_TB2.indexOf(self.gmapp_TB2_Screen), _translate(\"MainWindow\", \"GMAPP\"))\n self.viewer_TB2.setTabText(self.viewer_TB2.indexOf(self.floor_TB2_Screen), _translate(\"MainWindow\", \"FLOOR\"))\n self.viewer_TB2.setTabText(self.viewer_TB2.indexOf(self.camera_TB2_Screen), _translate(\"MainWindow\", \"CAMERA\"))\n self.label.setToolTip(_translate(\"MainWindow\", \"ROC Version\"))\n self.label.setText(_translate(\"MainWindow\", \"Alpha v0.1\"))\n self.robot_TB3.setTitle(_translate(\"MainWindow\", \" Robot 3 (TB3)\"))\n self.configure_TB3_Button.setText(_translate(\"MainWindow\", \"Settings\"))\n self.logs_TB3_Button.setText(_translate(\"MainWindow\", \"Logs\"))\n self.floor_TB3_Show.setText(_translate(\"MainWindow\", \"FLOOR\"))\n self.kinect_TB3_Show.setText(_translate(\"MainWindow\", \"KINECT\"))\n self.gmapp_TB3_Show.setText(_translate(\"MainWindow\", \"GMAPP\"))\n self.camera_TB3_Show.setText(_translate(\"MainWindow\", \"CAMERA\"))\n self.on_TB3_Viewer.setText(_translate(\"MainWindow\", \"ON\"))\n self.off_TB3_Viewer.setText(_translate(\"MainWindow\", \"OFF\"))\n self.reload_TB3.setText(_translate(\"MainWindow\", \"RELOAD\"))\n self.reset_TB3.setText(_translate(\"MainWindow\", \"RESET\"))\n self.x_TB3_Label.setText(_translate(\"MainWindow\", \"X:\"))\n self.y_TB3_Label.setText(_translate(\"MainWindow\", \"Y:\"))\n self.velocity_TB3_Label.setText(_translate(\"MainWindow\", \"Velocity:\"))\n self.battery_TB3_Label.setText(_translate(\"MainWindow\", \"Battery:\"))\n self.kinnect_TB3_Screen.setToolTip(_translate(\"MainWindow\", \"Image from the Kinnect\"))\n self.viewer_TB3.setTabText(self.viewer_TB3.indexOf(self.kinnect_TB3_Screen), _translate(\"MainWindow\", \"ROBOT\"))\n self.viewer_TB3.setTabText(self.viewer_TB3.indexOf(self.gmapp_TB3_Screen), _translate(\"MainWindow\", \"GMAPP\"))\n self.viewer_TB3.setTabText(self.viewer_TB3.indexOf(self.floor_TB3_Screen), _translate(\"MainWindow\", \"FLOOR\"))\n self.viewer_TB3.setTabText(self.viewer_TB3.indexOf(self.camera_TB3_Screen), _translate(\"MainWindow\", \"CAMERA\"))\n self.robot_TB4.setTitle(_translate(\"MainWindow\", \" Robot 4 (TB4)\"))\n self.configure_TB4_Button.setText(_translate(\"MainWindow\", \"Settings\"))\n self.logs_TB4_Button.setText(_translate(\"MainWindow\", \"Logs\"))\n self.floor_TB4_Show.setText(_translate(\"MainWindow\", \"FLOOR\"))\n self.kinect_TB4_Show.setText(_translate(\"MainWindow\", \"KINECT\"))\n self.gmapp_TB4_Show.setText(_translate(\"MainWindow\", \"GMAPP\"))\n self.camera_TB4_Show.setText(_translate(\"MainWindow\", \"CAMERA\"))\n self.on_TB4_Viewer.setText(_translate(\"MainWindow\", \"ON\"))\n self.off_TB4_Viewer.setText(_translate(\"MainWindow\", \"OFF\"))\n self.reload_TB4.setText(_translate(\"MainWindow\", \"RELOAD\"))\n self.reset_TB4.setText(_translate(\"MainWindow\", \"RESET\"))\n self.x_TB4_Label.setText(_translate(\"MainWindow\", \"X:\"))\n self.y_TB4_Label.setText(_translate(\"MainWindow\", \"Y:\"))\n self.velocity_TB4_Label.setText(_translate(\"MainWindow\", \"Velocity:\"))\n self.battery_TB4_Label.setText(_translate(\"MainWindow\", \"Battery:\"))\n self.kinnect_TB4_Screen.setToolTip(_translate(\"MainWindow\", \"Image from the Kinnect\"))\n self.viewer_TB4.setTabText(self.viewer_TB4.indexOf(self.kinnect_TB4_Screen), _translate(\"MainWindow\", \"ROBOT\"))\n self.viewer_TB4.setTabText(self.viewer_TB4.indexOf(self.gmapp_TB4_Screen), _translate(\"MainWindow\", \"GMAPP\"))\n self.viewer_TB4.setTabText(self.viewer_TB4.indexOf(self.floor_TB4_Screen), _translate(\"MainWindow\", \"FLOOR\"))\n self.viewer_TB4.setTabText(self.viewer_TB4.indexOf(self.camera_TB4_Screen), _translate(\"MainWindow\", \"CAMERA\"))" ]
[ "0.5091354", "0.48640856", "0.4863087", "0.4839665", "0.4798844", "0.4759309", "0.47126836", "0.47027948", "0.4693687", "0.4687498", "0.46860364", "0.4680407", "0.4671953", "0.46660995", "0.46642715", "0.4631289", "0.46038824", "0.46022075", "0.45926744", "0.4587362", "0.4554188", "0.45258534", "0.45225137", "0.45134565", "0.45083034", "0.45059207", "0.45015863", "0.4484689", "0.44838682", "0.44703877" ]
0.52568376
0
Report the result of an authentication request This endpoint returns the result of the request made in the '/auth/zenkeyasyncsignin' endpoint, identified by auth_req_id. If the request was successful, then the server makes and returns a token which can be used as to authorize API calls. It must be included in an Authorization
def async_token_result(auth_req_id): # create a new user based on auth request so that each auth request returns a different token new_user_params = { 'zenkey_sub': auth_req_id, 'name': 'Mock User', 'phone_number': '+15555555555', 'postal_code': '55555', 'email': '[email protected]', 'username': 'mockuser', 'password': 'mockuser' } new_user = UserModel.create_new_user(new_user_params) jwt_token = create_jwt(new_user, current_app.config['TOKEN_EXPIRATION_TIME'], current_app.config['BASE_URL'], current_app.config['SECRET_KEY']) return jsonify({ 'auth_req_id': auth_req_id, 'token': jwt_token, 'token_type': 'bearer', # we omit the refresh token for brevity in this example codebase 'refresh_token': 'omitted', 'expires': current_app.config['TOKEN_EXPIRATION_TIME'].total_seconds() })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authenticated_request(**kwargs):\n return authenticated_request_async(**kwargs).get_result()", "def auth_complete(self, *args, **kwargs):\n request_data = self.strategy.request_data()\n\n sso_params = request_data.get(\"sso\")\n sso_signature = request_data.get(\"sig\")\n\n param_signature = hmac.new(\n self.setting(\"SECRET\").encode(\"utf8\"), sso_params.encode(\"utf8\"), sha256\n ).hexdigest()\n\n if not hmac.compare_digest(str(sso_signature), str(param_signature)):\n raise AuthException(\"Could not verify discourse login\")\n\n decoded_params = urlsafe_b64decode(sso_params.encode(\"utf8\")).decode(\"ascii\")\n\n # Validate the nonce to ensure the request was not modified\n response = parse_qs(decoded_params)\n nonce_obj = self.get_nonce(response.get(\"nonce\"))\n if nonce_obj:\n self.delete_nonce(nonce_obj)\n else:\n raise AuthTokenError(self, \"Incorrect id_token: nonce\")\n\n kwargs.update({\"sso\": \"\", \"sig\": \"\", \"backend\": self, \"response\": response})\n return self.strategy.authenticate(*args, **kwargs)", "def callback__authenticate_get(req, test_env=test_env):\n assert req.url.startswith(OAUTH1__URL_AUTHORITY_AUTHENTICATE)\n qs = req.url.split(\"?\")[1]\n qs = dict(parse_qsl(qs))\n\n testapp = test_env[\"testapp_authority\"]\n res = testapp.get(\n \"/authority/oauth1/authorize?oauth_token=%s\" % qs[\"oauth_token\"],\n headers=req.headers,\n extra_environ=test_env[\"extra_environ_authority\"],\n status=200,\n )\n test_env[\"requests_session_authority\"].cookies.update(\n testapp.cookies\n ) # update the session with the cookies from the response\n\n # status is '200 OK'\n # return in a format tailored for `requests`\n return (int(res.status.split(\" \")[0]), res.headers, res.body)", "def async_token_request():\n required_params = ['login_hint',\n 'client_id',\n 'scope',\n 'mccmnc',\n 'redirect_uri']\n optional_params = ['correlation_id']\n validated_params = validate_params(request, required_params, optional_params)\n\n # if this was not a mock we would request a token from zenkey\n\n # create mock auth req id\n auth_req_id = validated_params['login_hint'] + '_' + str(secrets.SystemRandom().randrange(100000))\n\n return jsonify({\n 'auth_req_id': auth_req_id,\n 'expires_in': 3600\n })", "def do_auth(self, access_token, *args, **kwargs):\n data = self.user_data(access_token)\n data['access_token'] = access_token\n kwargs.update(data)\n kwargs.update({'response': data, 'backend': self})\n return self.strategy.authenticate(*args, **kwargs)", "def do_auth(self, access_token, *args, **kwargs):\n data = self.user_data(access_token, *args, **kwargs)\n response = kwargs.get('response') or {}\n response.update(data or {})\n if 'access_token' not in response:\n response['access_token'] = access_token\n kwargs.update({'response': response, 'backend': self})\n return self.strategy.authenticate(*args, **kwargs)", "def do_auth(self, access_token, *args, **kwargs):\n data = self.user_data(access_token, *args, **kwargs)\n response = kwargs.get('response') or {}\n response.update(data or {})\n if 'access_token' not in response:\n response['access_token'] = access_token\n kwargs.update({'response': response, 'backend': self})\n return self.strategy.authenticate(*args, **kwargs)", "def get_auth_token(self):\n return self.do_rpc('get_authorization',\n username=self._username,\n password=self._password)", "def auth_user():\n global token\n app.logger.info(\"Microsoft Planner Service running on /auth port as expected\")\n try:\n request_count = 0\n if request_count == 0:\n token = get_tokens_as_app(client_id, user_code_info, tenant_id)\n request_count = 1 \n if 'access_token' in token:\n app.logger.info('Adding access token to cache...')\n add_token_to_cache(client_id, tenant_id, token)\n return_object = (f\"{token['refresh_token']}\")\n return render_template('token.html', return_object=return_object)\n else:\n return_error = (\"Token response did not result in a proper response. Athenticate again please.\")\n return render_template('token.html', return_error=return_error)\n except AttributeError or TypeError:\n return_error = ('Authentification failed. Please pull and restart your system and authenticate again.')\n return render_template('token.html', return_error=return_error)\n except adal.AdalError as err:\n return_error = (\"You're logged in with the wrong user. Please log out and authenticate again.\")\n return render_template('token.html', return_error=return_error)", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)", "def async_token_retry(auth_req_id):\n return jsonify({'auth_req_id': auth_req_id})", "def auth_complete(self, *args, **kwargs):\n self.process_error(self.data)\n params = self.auth_complete_params(self.validate_state())\n\n response = requests.post(self.ACCESS_TOKEN_URL, data=params,\n headers=self.auth_headers())\n if response.status_code == 400:\n raise AuthCanceled(self)\n\n response.raise_for_status()\n\n try:\n response = response.json()\n except (ValueError, KeyError):\n raise AuthUnknownError(self)\n\n response.pop('data')\n self.process_error(response)\n return self.do_auth(response['access_token'], response=response,\n *args, **kwargs)", "def auth_token_api():\n data = request.get_json()\n if not data:\n response = jsonify({\n 'success': False,\n 'message': 'Missing request body'\n })\n response.status_code = 422\n return response\n\n # process argument\n login_type = data.get('auth_type')\n email = data.get('email').strip().lower()\n password = data.get('password')\n\n if not login_type or login_type not in ['email']:\n response = jsonify({\n 'success': False,\n 'message': 'Invalid auth_type'\n })\n response.status_code = 422\n return response\n\n # email authentication\n elif login_type == 'email':\n if not email:\n response = jsonify({\n 'success': False,\n 'message': 'Must provide email when auth_type is \"email\"'\n })\n response.status_code = 422\n return response\n user = db.session.query(User).filter(User.email == email, User.deleted == False).one_or_none()\n if not user:\n response = jsonify({\n 'success': False,\n 'message': 'Not Authorized: invalid email'\n })\n response.status_code = 403\n return response\n # check the user's password\n password_valid = check_password_hash(user.password, password)\n if not password_valid:\n response = jsonify({\n 'success': False,\n 'message': 'Not Authorized: invalid password'\n })\n response.status_code = 403\n return response\n\n token = generate_auth_token(user_id=user.user_id)\n response = jsonify({\n 'success': True,\n 'token': token\n })\n response.status_code == '200'\n return response", "def skyserv_authenticator(self):\n \n header = {\n 'Content-Type': accept, \n 'X-Auth-Token': self.casjobtoken,\n 'Accept': accept\n }\n # this format is disgusting but required....\n authdata = {\n 'auth' :{\n 'identity': {\n 'password': {\n 'user': {\n 'name': username,\n 'password': password\n }\n }\n }\n }\n }\n payload = json.dumps(authdata).encode(encoding='utf-8')\n try:\n post = requests.post(self.loginurl, data=payload, headers=header)\n\n if post.status_code == 200:\n response = json.loads(post.text)\n token = response[self.tokenkey]\n return token\n else:\n print('Username and/or password are invalid.')\n post.raise_for_status()\n except Exception as e:\n raise(str(e))", "def __step2_get_oauth_request_token(self, oauth_id):\n\n c, r = http._post(\n self.auth_package.OAUTH+'auth/',\n data={\n 'action': 'accepted',\n 'oauth': oauth_id,\n 'login': self.auth_package.login,\n 'user_pwd': self.auth_package.password,\n 'account': 'r',\n 'credentials': 'r',\n\n },\n )\n data = r.read()\n c.close()\n\n if r.status == 302:\n location = r.getheader('location', '')\n if not location.startswith(self.auth_package.redirect_uri):\n raise Exception(\"Got an unexpected redirection to %s\"%location)\n query = urlparse.urlsplit(location).query\n query_dict = dict(urlparse.parse_qsl(query))\n if 'code' in query_dict:\n self._token = query_dict['code'] # Oauth Request Token\n else:\n raise Exception(\"Got unexpected http code %s (%s)\" % (r.status, r.reason))", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)", "def auth_authenticate():\n data = {'LoginName': username, 'Password': password}\n parameters = data_to_json(data)\n url = base_url + 'general/authentication/authenticate'\n response = make_request(url, parameters)\n r_value = ''\n if response['Status'] == 0:\n r_value = response['Value']['Token']\n return r_value", "def get_auth_token(self, is_retry=False):\n self.login_response = self.login_handler.login(self)\n if not self.login_response:\n self.available = False\n return False\n self.setup_params(self.login_response)\n if self.login_handler.check_key_required(self):\n self.key_required = True\n return self._auth_header", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def authenticate(self, rfid):\n print(\"Auth id: [{}]\".format(rfid))\n\n values = {'id' : rfid}\n data = urllib.parse.urlencode(values)\n data = data.encode('utf-8')\n\n t1 = perf_counter()\n\n req = urllib.request.Request(self.auth_url, data)\n try:\n resp = urllib.request.urlopen(req, timeout=self.request_timeout)\n except URLError as err:\n print(\"URLError: auth_url:[{}]\".format(self.auth_url))\n print(\"URLError: {}\".format(err))\n print(\"Falling back to local cache\")\n cached = self.auth_from_cache(rfid)\n return cached\n except timeout as err:\n cached = self.auth_from_cache(rfid)\n return cached\n\n text = resp.read()\n\n t2 = perf_counter()\n print(\"Auth got [{}] in {} seconds\".format(text, t2-t1))\n\n if text == b'Granted':\n return True", "def _auth(self):\n url = 'https://forsight.crimsonhexagon.com/api/authenticate?'\n\n payload = {\n 'username': self.username,\n 'password': self.password\n }\n\n r = self.session.get(url, params=payload)\n j_result = r.json()\n self.auth_token = j_result[\"auth\"]\n #print('-- Crimson Hexagon Authenticated --')\n return", "def _request_token(self):\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret\n }\n\n response = self._http_request(\n method='POST',\n headers={'Content-Type': 'application/x-www-form-urlencoded'},\n full_url=self.auth_url,\n data=params\n )\n access_token = response.get('access_token')\n auth_header = {'Authorization': f'Bearer {access_token}'}\n return auth_header", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def authz(environ, start_response, logger, kaka=None):\n _session_db = environ[\"oic.sessiondb\"]\n _cc = environ[\"oic.client_config\"]\n _conc = environ[\"oic.consumer.config\"]\n _server_info = environ[\"oic.server.info\"]\n\n _log_info = logger.info\n\n try:\n _cli = Consumer(_session_db, _conc, _cc, _server_info)\n response = _cli.parse_authz(environ, start_response, logger)\n except (AuthzError, TokenError), err:\n resp = http_util.Unauthorized(\"%s\" % err)\n return resp(environ, start_response)\n except UnknownState, err:\n resp = http_util.BadRequest(\"Unsolicited Response\")\n return resp(environ, start_response)\n\n if _conc[\"flow_type\"] == \"code\": # Not done yet\n try:\n _cli.complete(logger) # get the access token from the token\n # endpoint\n except TokenError, err:\n resp = http_util.Unauthorized(\"%s\" % err)\n return resp(environ, start_response)\n\n # Valid for 6 hours (=360 minutes)\n kaka = http_util.cookie(_cc[\"client_id\"], _cli.state, _cli.seed,\n expire=360, path=\"/\")\n\n _log_info(\"DUMP: %s\" % (_cli.sdb[_cli.sdb[\"seed:%s\" % _cli.seed]],))\n \n resp = http_util.Response(\"Your will is registered\", headers=[kaka])\n _log_info(\"Cookie: %s\" % (kaka,))\n return resp(environ, start_response)", "def step_impl(context, request_type):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance,\n request_type)", "def awa_provide_authorization_result(self):\n # TODO: Assuming first server is good - need to make fallback logic\n return self.session.get_any(\"{base}{request_url}\".format(base=self.servers[0],\n request_url=F\"/Destiny2/Awa/AwaProvideAuthorizationResult/\"))", "def get(self):\n\n\t\trequest = user_auth_parser.parse_args(strict=True)\n\n\t\tresult = Authenticator.authenticate(\n\t\t\trequest[\"username\"],\n\t\t\trequest[\"password\"]\n\t\t)\n\n\t\treturn result", "def take_auth(aid):\r\n auth_passwd = request.values.get('auth_passwd', '')\r\n with engine.with_session() as ss:\r\n cur_auth = ss.query(LxContractAuthorization).get(aid)\r\n if not cur_auth:\r\n return jsonify({'success': False, 'errorMsg': constants.ERROR_CODE[\r\n 'AUTH_NOT_EXISTS']})\r\n if not sha256_crypt.verify(auth_passwd, cur_auth.auth_passwd):\r\n return jsonify({'success': False, 'errorMsg': constants.ERROR_CODE[\r\n 'AUTH_PASSWD_ERROR']})\r\n update_dict = dict()\r\n update_dict['user'] = current_user\r\n cur_auth.update(update_dict)\r\n return jsonify({'success': True, 'data': cur_auth.contract_id})", "def authentication_request():\n # Get the access token from the header\n auth_header = request.headers.get('Authorization')\n if auth_header:\n try:\n access_token = auth_header.split(' ')[1]\n except IndexError:\n return {\"message\": \"Token is malformed\"}, status.HTTP_401_UNAUTHORIZED\n else:\n access_token = ''\n\n return access_token", "async def async_step_auth(self, user_input=None):\n if user_input.get(const.CODE):\n self.data = user_input\n return self.async_external_step_done(next_step_id=\"finish\")\n\n profile = user_input.get(const.PROFILE)\n\n auth_client = self.get_auth_client(profile)\n\n url = auth_client.get_authorize_url()\n\n return self.async_external_step(step_id=\"auth\", url=url)" ]
[ "0.62605786", "0.6203439", "0.60911304", "0.6047729", "0.59584135", "0.5940721", "0.5940721", "0.5912063", "0.59063685", "0.5894196", "0.5864419", "0.58414835", "0.58228904", "0.5785015", "0.5781725", "0.57685417", "0.5760979", "0.5752778", "0.57499754", "0.5749469", "0.57336605", "0.5715225", "0.56270075", "0.5624586", "0.5620156", "0.56067866", "0.55957484", "0.5571313", "0.55693537", "0.5563864" ]
0.6753446
0
Retry an authentication request This endpoint retries the request made in the '/auth/zenkeyasyncsignin' endpoint, identified by auth_req_id, and has the same return value.
def async_token_retry(auth_req_id): return jsonify({'auth_req_id': auth_req_id})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def retry_request(\n self,\n tapi_exception,\n error_message,\n repeat_number,\n response,\n request_kwargs,\n api_params,\n **kwargs\n ):\n return False", "async def _retry_get(url: str, retries: int, **kwargs):\r\n retries -= 1\r\n if retries >= 0:\r\n logger.warning(\r\n f\"Retrying request to {url}. Retries remaining: {retries}\")\r\n return await asyncio.create_task(\r\n self.get(url, retries, **kwargs))\r\n logger.error(\r\n f\"Max retries exceeded: {url}. URL can not be navigated.\")", "def _retry_request(self, request, timeout=2, attempts=3):\n import googleapiclient\n\n try:\n return request.execute()\n except BrokenPipeError as ex:\n if attempts > 0:\n time.sleep(timeout)\n return self._retry_request(request, timeout * 2, attempts - 1)\n raise ex\n except googleapiclient.errors.HttpError as ex:\n log_verbose_traceback(ex)\n raise ex\n except Exception as ex:\n log_verbose_traceback(ex)\n raise ex", "def _retry(self, result, method, url, params_dict, **kwargs):\n return result", "def _do_fail_retry(self, event):\n if self._retries > 0:\n self._retries -= 1\n self._state_machine.retry()\n else:\n self._state_machine.abort(result=event.result)", "def _do_fail_retry(self, event):\n if self._retries > 0:\n self._retries -= 1\n self._state_machine.retry()\n else:\n self._state_machine.abort(result=event.result)", "def retry(self, envelope):\n # type: (RetryPolicy, Envelope) -> None\n raise NotImplementedError()", "def retry(self):\n return self._retry", "async def async_step_reauth(self, entry_data: Mapping[str, Any]) -> FlowResult:\n return await self.async_step_reauth_confirm()", "async def async_step_reauth(self, entry_data: Mapping[str, Any]) -> FlowResult:\n return await self.async_step_reauth_confirm()", "def test_retry(self):\n self.response.raise_for_status.side_effect = \\\n [requests.HTTPError(), None]\n\n wsgi._retryable('get', 'http://some.thing')\n\n assert self.session.get.call_count == 2", "def _retry_occurred(self):", "def retry_on_bad_auth(func):\n @wraps(func)\n def retry_version(self, *args, **kwargs):\n while True:\n try:\n return func(self, *args, **kwargs)\n except trolly.ResourceUnavailable:\n sys.stderr.write('bad request (refresh board id)\\n')\n self._board_id = None\n self.save_key('board_id', None)\n except trolly.Unauthorised:\n sys.stderr.write('bad permissions (refresh token)\\n')\n self._client = None\n self._token = None\n self.save_key('token', None)\n return retry_version", "def retry_task(func):\n\n @wraps(func)\n def wrapper(task, *args, **kwargs):\n retries = task.request.retries\n exponential = 2 ** retries\n exponential_backoff = random.randint(exponential, exponential * 2)\n try:\n result = func(task, *args, **kwargs)\n except Exception as e:\n logger.error(\n f\"Retriying {task.request.id} after {exponential_backoff} seconds\"\n )\n raise task.retry(countdown=exponential_backoff, exc=e, max_retries=5)\n\n return result\n\n return wrapper", "def __request(self, method, resource, retry=True):\n headers = {\"x-access-token\": self._accessToken}\n result = self.__call(method, resource, headers=headers)\n\n if result:\n return result\n elif result.status_code == 401 and retry:\n self.__authenticate()\n return self.__request(method, resource, retry=False)\n else:\n raise requests.HTTPError(result)", "def retry(self, times):\n return Retry((requests.ConnectionError, requests.Timeout), times)", "def retry_job(\n self,\n ) -> Callable[[cloud_deploy.RetryJobRequest], cloud_deploy.RetryJobResponse]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"retry_job\" not in self._stubs:\n self._stubs[\"retry_job\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.deploy.v1.CloudDeploy/RetryJob\",\n request_serializer=cloud_deploy.RetryJobRequest.serialize,\n response_deserializer=cloud_deploy.RetryJobResponse.deserialize,\n )\n return self._stubs[\"retry_job\"]", "def _a_get_retry_object(self) -> AsyncRetrying:\n return AsyncRetrying(**self.retry_args)", "def cancel_retry(self):\n if self._cancel_retry is not None:\n self._cancel_retry.cancel()\n self._cancel_retry = None", "def cancel_retry(self):\n if self._cancel_retry is not None:\n self._cancel_retry.cancel()\n self._cancel_retry = None", "async def async_step_reauth(self, entry_data: Mapping[str, Any]) -> FlowResult:\n self._auth_data = dict(entry_data)\n return await self.async_step_reauth_validate(entry_data)", "def get_session_retry(self, retries=3, backoff_factor=0.2,\n status_forcelist=(404, 500, 502, 504),\n session=None):\n session = session or requests.Session()\n retry = Retry(total=retries, read=retries, connect=retries,\n backoff_factor=backoff_factor, status_forcelist=status_forcelist)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n return session", "async def _wait_retry(self) -> None:\n # Sleep 2^tries + 0…tries*3 seconds between retries\n self.retry_task = asyncio.create_task(\n asyncio.sleep(2 ** min(9, self.tries) + random.randint(0, self.tries * 3))\n )\n await self.retry_task\n self.retry_task = None", "def retry_mechanism(self):\n # If future is done we can close the transport\n if self.on_response_received.done():\n self.transport.close()\n elif self._retries < self._max_retries:\n self._retries += 1\n logger.debug(f'Retry #{self._retries} of {self._max_retries}')\n self._send_request()\n else:\n logger.debug(f'Max number of retries ({self._max_retries}) reached, closing socket')\n self.on_response_received.set_exception(MaxRetriesException)\n self.transport.close()", "def auth_renew_session(self) -> None:\n self.__logger.debug('Eva.auth_renew_session called')\n return self.__http_client.auth_renew_session()", "def set_retry_timeout(self, retry_timeout):", "def retry_request(self, method, action, body=None,\r\n headers=None, params=None):\r\n max_attempts = self.retries + 1\r\n for i in range(max_attempts):\r\n try:\r\n return self.do_request(method, action, body=body,\r\n headers=headers, params=params)\r\n except exceptions.ConnectionFailed:\r\n # Exception has already been logged by do_request()\r\n if i < self.retries:\r\n _logger.debug(_('Retrying connection to Neutron service'))\r\n time.sleep(self.retry_interval)\r\n\r\n raise exceptions.ConnectionFailed(reason=_(\"Maximum attempts reached\"))", "def resumeJob(_id, client):\n return tryAgainJob(_id)", "async def async_step_reauth(self, entry_data: Mapping[str, Any]) -> FlowResult:\n self._reauth_entry = self.hass.config_entries.async_get_entry(\n self.context[\"entry_id\"]\n )\n return await self.async_step_reauth_confirm()", "def _Retry(func, *args, **kwargs):\n retries = _RETRIES\n while True:\n try:\n return func(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except\n retries -= 1\n if retries > 0:\n log.info('Exception {e} thrown in {func}. Retrying.'.format(\n e=e, func=func.__name__))\n time.sleep(1)\n else:\n raise e" ]
[ "0.6041712", "0.5856318", "0.5761647", "0.5758948", "0.5676299", "0.5676299", "0.5544573", "0.5448385", "0.5387163", "0.5387163", "0.5383666", "0.53787154", "0.53494453", "0.53354377", "0.52876604", "0.528456", "0.52109355", "0.5199061", "0.5176951", "0.5176951", "0.51569957", "0.5147232", "0.5137173", "0.5101735", "0.51014936", "0.504689", "0.5044108", "0.5030728", "0.50285125", "0.49637493" ]
0.7038709
0
Cancel an authentication request This endpoint cancels the request made in the '/auth/zenkeyasyncsignin' endpoint, identified by auth_req_id, and just returns a status, 200 if successful.
def async_token_cancel(auth_req_id): #pylint: disable=unused-argument return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def nexmo_cancel(request):\n state = request.validated[\"querystring\"][\"state\"]\n\n # Require on-going session\n state_info = request.registry.cache.get(state)\n\n if not state_info:\n error_msg = \"The Nexmo session was not found, please re-authenticate.\"\n return http_error(\n httpexceptions.HTTPRequestTimeout(),\n errno=ERRORS.MISSING_AUTH_TOKEN,\n message=error_msg,\n )\n else:\n state_info = json.loads(state_info)\n\n params = {\n \"api_key\": nexmo_conf(request, \"api_key\"),\n \"api_secret\": nexmo_conf(request, \"api_secret\"),\n \"request_id\": state_info[\"request_id\"],\n \"cmd\": \"cancel\",\n }\n\n cancel_url = \"{}/verify/control/json\".format(\n nexmo_conf(request, \"api_endpoint\").rstrip(\"/\")\n )\n\n try:\n resp = requests.get(cancel_url, params=params)\n except requests.exceptions.ConnectionError:\n logger.exception(\n \"A connection error occured when trying to cancel the auth code\"\n )\n error_msg = \"The Nexmo API is not ready, please retry later.\"\n return http_error(\n httpexceptions.HTTPServiceUnavailable(),\n errno=ERRORS.BACKEND,\n message=error_msg,\n )\n\n try:\n resp.raise_for_status()\n except requests.exceptions.HTTPError:\n logger.exception(\"An error occured when trying to cancel the auth code\")\n error_msg = \"The Nexmo API is not ready, please retry later.\"\n return http_error(\n httpexceptions.HTTPServiceUnavailable(),\n errno=ERRORS.BACKEND,\n message=error_msg,\n )\n\n data = resp.json()\n\n if data[\"status\"] == \"19\":\n # Two cases\n # - Cancel too early, please retry later.\n # - Cancel too late, its gone.\n if data[\"error_text\"].endswith(\n \"Too many attempts to re-deliver have already been made.\"\n ):\n logger.info(\"Nexmo Code Cancelation failed. Too late: {}\".format(data))\n return http_error(\n httpexceptions.HTTPGone(),\n errno=ERRORS.BACKEND,\n message=\"Nexmo code cancelation failed. Too late.\",\n )\n else:\n logger.info(\"Nexmo Code Cancelation too early: {}\".format(data))\n return http_error(\n httpexceptions.HTTPBadRequest(),\n errno=ERRORS.BACKEND,\n message=\"Nexmo code cancelation failed. Too early.\",\n )\n\n if data[\"status\"] != \"0\":\n logger.info(\"Nexmo Code Cancelation Failed: {}\".format(data))\n error_details = {\n \"name\": \"code\",\n \"location\": \"querystring\",\n \"description\": \"Nexmo code cancelation failed.\",\n }\n raise_invalid(request, **error_details)\n\n # Make sure we cannot try twice with the same state\n request.registry.cache.delete(state)\n\n return {\"state\": state, \"status\": \"canceled\"}", "def cancel(self) -> None:\n if not self._state == AsyncPostRequest._RUNNING:\n raise Exception(\"Request not started.\")\n self._hasBeenCancelled = True\n self._resultFuture.cancel()", "def cancelRequest(self, json):\n uID = json.get('uID')\n print(RequestsDAO().getRequestByuID(uID))\n if not RequestsDAO().getRequestByuID(uID):\n return jsonify(Error=\"No request found\"), 404\n else:\n\n if uID:\n RequestsDAO().deleteRequest(uID)\n return jsonify(User=\"User deleted\"), 200\n else:\n return jsonify(Error=\"Unexpected attributes in update request\"), 400", "def async_token_retry(auth_req_id):\n return jsonify({'auth_req_id': auth_req_id})", "def cancel_run(run_id):\n # Get the access token first to raise an error immediately if no token is\n # present (to avoid unnecessarily instantiating the service API).\n token = ACCESS_TOKEN(request)\n # If the body contains a Json object verify that the object has the\n # mandatory element 'reason'\n reason = None\n if request.json:\n try:\n obj = util.validate_doc(\n request.json,\n mandatory=['reason']\n )\n reason = obj['reason']\n except ValueError as ex:\n raise err.InvalidRequestError(str(ex))\n from robflask.service import service\n with service(access_token=token) as api:\n # Authentication of the user from the expected api_token in the header\n # will fail if no token is given or if the user is not logged in.\n r = api.runs().cancel_run(\n run_id=run_id,\n reason=reason\n )\n return make_response(jsonify(r), 200)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)", "def Cancel(self, request, global_params=None):\n config = self.GetMethodConfig('Cancel')\n return self._RunMethod(\n config, request, global_params=global_params)" ]
[ "0.62335056", "0.580511", "0.5616215", "0.5513265", "0.548557", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032", "0.5480032" ]
0.6933732
0
Grant token request This endpoint grants the token request made in the '/auth/zenkeyasyncsignin' endpoint, identified by auth_req_id, and has the same return value. The ZenKey carrier hits this endpoint
def async_token_grant(): required_params = ['auth_req_id', 'state', 'scope'] optional_params = ['access_token', 'expires_in', 'refresh_token', 'id_token', 'error', 'error_description', 'correlation_id'] validate_params(request, required_params, optional_params) # if this was not a mock we would save the ranted token infromation to a db return ""
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def async_token_request():\n required_params = ['login_hint',\n 'client_id',\n 'scope',\n 'mccmnc',\n 'redirect_uri']\n optional_params = ['correlation_id']\n validated_params = validate_params(request, required_params, optional_params)\n\n # if this was not a mock we would request a token from zenkey\n\n # create mock auth req id\n auth_req_id = validated_params['login_hint'] + '_' + str(secrets.SystemRandom().randrange(100000))\n\n return jsonify({\n 'auth_req_id': auth_req_id,\n 'expires_in': 3600\n })", "def auth_access_token_request(self, auth_access_token_request):\n\n self._auth_access_token_request = auth_access_token_request", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def async_token_retry(auth_req_id):\n return jsonify({'auth_req_id': auth_req_id})", "def async_token_result(auth_req_id):\n\n # create a new user based on auth request so that each auth request returns a different token\n new_user_params = {\n 'zenkey_sub': auth_req_id,\n 'name': 'Mock User',\n 'phone_number': '+15555555555',\n 'postal_code': '55555',\n 'email': '[email protected]',\n 'username': 'mockuser',\n 'password': 'mockuser'\n }\n new_user = UserModel.create_new_user(new_user_params)\n jwt_token = create_jwt(new_user,\n current_app.config['TOKEN_EXPIRATION_TIME'],\n current_app.config['BASE_URL'],\n current_app.config['SECRET_KEY'])\n\n return jsonify({\n 'auth_req_id': auth_req_id,\n 'token': jwt_token,\n 'token_type': 'bearer',\n # we omit the refresh token for brevity in this example codebase\n 'refresh_token': 'omitted',\n 'expires': current_app.config['TOKEN_EXPIRATION_TIME'].total_seconds()\n })", "def step_impl(context):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n fields.update(dict(context.table))\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)", "async def _token_request(self, data: dict) -> dict:\n session = async_get_clientsession(self.hass)\n\n data[\"client_id\"] = self.client_id\n\n if self.client_secret is not None:\n data[\"client_secret\"] = self.client_secret\n\n headers = {\n \"Authorization\": BasicAuth(self.client_id,\n self.client_secret).encode(),\n \"Content-Type\": \"application/x-www-form-urlencoded\",\n }\n\n resp = await session.post(self.token_url,\n headers=headers,\n data=data)\n resp.raise_for_status()\n return cast(dict, await resp.json())", "def _request_token(self):\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret\n }\n\n response = self._http_request(\n method='POST',\n headers={'Content-Type': 'application/x-www-form-urlencoded'},\n full_url=self.auth_url,\n data=params\n )\n access_token = response.get('access_token')\n auth_header = {'Authorization': f'Bearer {access_token}'}\n return auth_header", "def step_impl(context, request_type):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance,\n request_type)", "def grant_token(request):\n\n grant_token_svc = request.find_service(name=\"grant_token\")\n h_user = request.lti_user.h_user\n\n return {\"grant_token\": grant_token_svc.generate_token(h_user)}", "def step_impl(context, field_name):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['versioned_auth']['client_id'],\n 'redirect_uri': context.vendor_config['versioned_auth']['redirect_uri'],\n }\n\n del fields[field_name]\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)", "def step_impl(context, field_name):\n fields = {\n 'grant_type': 'authorization_code',\n 'code': context.code,\n 'client_id': context.vendor_config['auth']['client_id'],\n 'redirect_uri': context.vendor_config['auth']['redirect_uri'],\n }\n\n del fields[field_name]\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def token_endpoint(self, request=\"\", authn=\"\", dtype=\"urlencoded\", **kwargs):\n logger.debug(\"- token -\")\n logger.debug(\"token_request: %s\" % sanitize(request))\n\n areq = self.server.message_factory.get_request_type(\n \"token_endpoint\"\n )().deserialize(request, dtype)\n\n # Verify client authentication\n try:\n client_id = self.client_authn(self, areq, authn)\n except (FailedAuthentication, AuthnFailure) as err:\n logger.error(err)\n error = TokenErrorResponse(\n error=\"unauthorized_client\", error_description=\"%s\" % err\n )\n return Unauthorized(error.to_json(), content=\"application/json\")\n\n logger.debug(\"AccessTokenRequest: %s\" % sanitize(areq))\n\n # `code` is not mandatory for all requests\n if \"code\" in areq:\n try:\n _info = self.sdb[areq[\"code\"]]\n except KeyError:\n logger.error(\"Code not present in SessionDB\")\n error = TokenErrorResponse(\n error=\"unauthorized_client\", error_description=\"Invalid code.\"\n )\n return Unauthorized(error.to_json(), content=\"application/json\")\n\n resp = self.token_scope_check(areq, _info)\n if resp:\n return resp\n # If redirect_uri was in the initial authorization request verify that they match\n if (\n \"redirect_uri\" in _info\n and areq[\"redirect_uri\"] != _info[\"redirect_uri\"]\n ):\n logger.error(\"Redirect_uri mismatch\")\n error = TokenErrorResponse(\n error=\"unauthorized_client\",\n error_description=\"Redirect_uris do not match.\",\n )\n return Unauthorized(error.to_json(), content=\"application/json\")\n if \"state\" in areq:\n if _info[\"state\"] != areq[\"state\"]:\n logger.error(\"State value mismatch\")\n error = TokenErrorResponse(\n error=\"unauthorized_client\",\n error_description=\"State values do not match.\",\n )\n return Unauthorized(error.to_json(), content=\"application/json\")\n\n # Propagate the client_id further\n areq.setdefault(\"client_id\", client_id)\n grant_type = areq[\"grant_type\"]\n if grant_type == \"authorization_code\":\n return self.code_grant_type(areq)\n elif grant_type == \"refresh_token\":\n return self.refresh_token_grant_type(areq)\n elif grant_type == \"client_credentials\":\n return self.client_credentials_grant_type(areq)\n elif grant_type == \"password\":\n return self.password_grant_type(areq)\n else:\n raise UnSupported(\"grant_type: {}\".format(grant_type))", "def request_token(self, **kwargs):\n # type: (Any) -> Token\n token = self._request(\n self._client.fetch_token,\n self._token_endpoint,\n grant_type=self.GRANT_AUTHORIZATION_CODE,\n **kwargs\n )\n self.set_token(token)\n return token", "def auth_token(self):", "def apply_auth():\n\tclient = BaiduOpenApi()\n\tapi = client.device.code\n\tresp = client.device.code.get(response_type=\"device_code\", scope=\"netdisk\")\n\t# open grant page and wait for user confirm\n\twebbrowser.open_new_tab(r\"http://openapi.baidu.com/device?code=%s\"%resp[\"user_code\"])\n\t# yield to main\n\tyield\n\t# main will tell user to confirm and it will take a while\n\t# polling to wait server back\n\tpolling_tokens(resp[\"device_code\"], resp[\"interval\"], resp[\"expires_in\"])", "def step_impl(context):\n fields = {\n 'grant_type': 'refresh_token',\n 'refresh_token': context.oauth.refresh_token,\n 'scope': context.vendor_config['auth']['scope'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['auth'],\n context.conformance)", "def token_request(post_data, auth_config, conformance):\n auth = None\n if auth_config.get('confidential_client'):\n auth = requests.auth.HTTPBasicAuth(auth_config['client_id'],\n auth_config['client_secret'])\n\n uris = fhir.get_oauth_uris(conformance)\n\n response = requests.post(uris['token'],\n data=post_data,\n allow_redirects=False,\n auth=auth,\n timeout=5)\n\n return response", "def __step2_get_oauth_request_token(self, oauth_id):\n\n c, r = http._post(\n self.auth_package.OAUTH+'auth/',\n data={\n 'action': 'accepted',\n 'oauth': oauth_id,\n 'login': self.auth_package.login,\n 'user_pwd': self.auth_package.password,\n 'account': 'r',\n 'credentials': 'r',\n\n },\n )\n data = r.read()\n c.close()\n\n if r.status == 302:\n location = r.getheader('location', '')\n if not location.startswith(self.auth_package.redirect_uri):\n raise Exception(\"Got an unexpected redirection to %s\"%location)\n query = urlparse.urlsplit(location).query\n query_dict = dict(urlparse.parse_qsl(query))\n if 'code' in query_dict:\n self._token = query_dict['code'] # Oauth Request Token\n else:\n raise Exception(\"Got unexpected http code %s (%s)\" % (r.status, r.reason))", "def _request_token(self):\n response = requests.post(\n \"%s/generateToken\" % self.root_uri.rstrip(\"/\"), {\n \"username\": self.username,\n \"password\": self.password,\n \"expiration\": '60',\n \"referer\": 'https://wsdot.maps.arcgis.com',\n \"f\": 'json'\n })\n\n token_info = response.json()\n if \"error\" in token_info:\n raise TokenError(token_info[\"error\"])\n self._token = token_info[\"token\"]\n self._expires = datetime.fromtimestamp(token_info[\"expires\"] / 1000)", "def post(self):\n current_user_id = get_jwt_identity()\n new_token = create_access_token(identity=current_user_id)\n response, status = {\n 'message': 'Access token was successfully refreshed',\n 'access_token': new_token\n }, 200\n return Response(dumps(response), status=status, mimetype='application/json')", "def refresh(self):\n self._request_token(grant_type='client_credentials')", "def step_impl(context):\n fields = {\n 'grant_type': 'refresh_token',\n 'refresh_token': context.oauth.refresh_token,\n 'scope': context.vendor_config['versioned_auth']['scope'],\n }\n\n context.response = token_request(fields,\n context.vendor_config['versioned_auth'],\n context.conformance)", "def set_access_token(self, auth_code=None):\n\n oauth_params = {\n 'client_id': self.client_id,\n 'client_secret': self.client_secret,\n 'code': auth_code,\n 'redirect_uri': self.redirect_uri\n }\n token_request = req.post(OAUTH_ENDPOINT, data=oauth_params)\n token_response = token_request.json()\n access_token = token_response['access_token']\n self.access_token = access_token", "def post(self):\n\n action = self.request.get('action')\n if not action:\n raise ErrorMessage(404, 'missing action (requested_action) params')\n\n self.require_action_permitted('grant')\n\n account = model.Account.get(self.request.get('key'))\n if not account:\n raise ErrorMessage(404, 'bad key given')\n\n #TODO(eyalf): define account.display_name() or something\n name = account.email\n if not action in account.requested_actions:\n #i18n: Error message\n raise ErrorMessage(404, _('No pending request for '\n '%(account_action)s by %(user)s')\n % (action, name))\n account.requested_actions.remove(action)\n grant = self.request.get('grant', 'deny')\n if grant == 'approve':\n account.actions.append(action)\n account.put()\n logging.info('%s request for %s was %s' % (account.email,\n action,\n grant))\n\n if self.params.embed:\n if grant == 'approve':\n self.write(\n #i18n: Application for the given permission action approved\n _('Request for becoming %(action)s was approved.') % action)\n else:\n self.write(\n #i18n: Application for the given permission action denied\n _('Request for becoming %(action)s was denied.') % action)\n else:\n raise Redirect(self.get_url('/grant_access'))", "def __call__(self, r):\n r.headers[\"x-aims-auth-token\"] = self._token\n return r", "def callback():\n code = request.args.get('code')\n result = http.post(token_uri, data = {\n 'grant_type': 'authorization_code',\n 'code': code,\n 'redirect_uri': redirect_uri,\n 'client_id': client_id,\n 'client_secret': client_secret\n })\n data = result.json()\n \n access_token = data['access_token']\n refresh_token = data['refresh_token']\n \n cache.set('access_token', access_token)\n cache.set('refresh_token', refresh_token)\n\n return redirect('/')", "def authorization_successful(req, resp):\n params = {\n \"client_id\": os.getenv('STRAVA_CLIENT_ID'),\n \"client_secret\": os.getenv('STRAVA_CLIENT_SECRET'),\n \"code\": req.params.get('code'),\n \"grant_type\": \"authorization_code\"\n }\n r = requests.post(\"https://www.strava.com/oauth/token\", params)\n logger.debug(r.text)\n resp.text = r.text" ]
[ "0.6658314", "0.63655144", "0.609516", "0.60386145", "0.6004609", "0.5970912", "0.5961136", "0.5922673", "0.59126514", "0.5911629", "0.5882935", "0.5847808", "0.5799271", "0.57977325", "0.56687057", "0.5584188", "0.55664384", "0.55645496", "0.5562488", "0.55596745", "0.55477047", "0.5507842", "0.5497168", "0.54701656", "0.5460427", "0.54490685", "0.5448808", "0.54449826", "0.54447615", "0.5421792" ]
0.68695843
0
The construction method of the page passes the page's implemented entityBlocks attribute to the Template base class construction , or the optional entityBlocks Parameter.
def __init__(self, entityBlocks): Template.__init__(self, entityBlocks)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, entityBlocks=None):\n\t\tif entityBlocks:\n\t\t\tself.entityBlocks = entityBlocks\n\t\tTemplate.__init__(self, self.entityBlocks)", "def __init__(self,**kwargs):\n Element.__init__(self,**kwargs)\n self.setVars(['entity'],**kwargs)", "def __init__(self, template_content, section_type):\n self.helpers = Documentation()\n self.template_content = template_content\n self.section_type = section_type\n self.stype = self.section_type\n self.children = []", "def __init__(self, *args, **kwargs):\r\n if not self.model:\r\n raise ImproperlyConfigured(\"Model must be set before super(%s, self).__init__ is called\" % self.__class__.__name__)\r\n\r\n self.item_panel_template = [\r\n \"servee/wysiwyg/insert/%s/%s/_panel.html\" % (self.model._meta.app_label, self.model._meta.module_name),\r\n \"servee/wysiwyg/insert/%s/_panel.html\" % (self.model._meta.app_label),\r\n \"servee/wysiwyg/insert/_panelt.html\",\r\n ]\r\n self.item_display_template = [\r\n \"servee/wysiwyg/insert/%s/%s/_list.html\" % (self.model._meta.app_label, self.model._meta.module_name),\r\n \"servee/wysiwyg/insert/%s/_list.html\" % (self.model._meta.app_label),\r\n \"servee/wysiwyg/insert/_item_list.html\",\r\n ]\r\n self.item_detail_template = [\r\n \"servee/wysiwyg/insert/%s/%s/_detail.html\" % (self.model._meta.app_label, self.model._meta.module_name),\r\n \"servee/wysiwyg/insert/%s/_detail.html\" % (self.model._meta.app_label),\r\n \"servee/wysiwyg/insert/_item_detail.html\",\r\n ]\r\n self.item_list_template = [\r\n \"servee/wysiwyg/insert/%s/%s/_list.html\" % (self.model._meta.app_label, self.model._meta.module_name),\r\n \"servee/wysiwyg/insert/%s/_list.html\" % (self.model._meta.app_label),\r\n \"servee/wysiwyg/insert/_item_list.html\",\r\n ]\r\n self.item_render_template = [\r\n \"servee/wysiwyg/insert/%s/%s/_render.html\" % (self.model._meta.app_label, self.model._meta.module_name),\r\n \"servee/wysiwyg/insert/%s/_render.html\" % (self.model._meta.app_label),\r\n \"servee/wysiwyg/insert/_item_render.html\",\r\n ]\r\n self.item_add_template = [\r\n \"servee/wysiwyg/insert/%s/%s/_add.html\" % (self.model._meta.app_label, self.model._meta.module_name),\r\n \"servee/wysiwyg/insert/%s/_add.html\" % (self.model._meta.app_label),\r\n \"servee/wysiwyg/insert/_item_add.html\",\r\n ]\r\n\r\n super(ModelInsert, self).__init__(*args, **kwargs)", "def __init__(self,template_file, **kwargs):\r\n \r\n env = Environment(\r\n loader=PackageLoader('email_generator', 'templates'),\r\n autoescape=select_autoescape(['html', 'xml'])\r\n )\r\n template = env.get_template(template_file)\r\n self.body = template.render(**kwargs)", "def create_page(self):", "def __init__(self):\n self.block_stack = []\n\n # TODO do I still need this?\n resource_path = None\n if not resource_path:\n resource_path = os.path.join(\n os.path.split(__file__)[0], \"../pymarkdown/resources\"\n )\n InlineHelper.initialize(resource_path)", "def init_page_elements(self):\n pass", "def __init__(self):\n self.chain = [Block.genesis()]", "def test_create_page_with_whatyouneed_block(self):\n\n what_you_need_block = PageWhatYouNeedBlock.objects.create(\n **_whatyouneed_block_data)\n Page.objects.create(what_you_need_block=what_you_need_block,\n **_page_data)\n\n response = self.client.get(reverse('home'))\n self.assertEqual(response.status_code, 200)\n\n self.assertIn('title', response.context)\n self.assertIn('left_column_title', response.context)\n self.assertIn('is_touch', response.context)\n self.assertIn('is_colour', response.context)\n self.assertIn('middle_column_title', response.context)\n self.assertIn('middle_column_label', response.context)\n self.assertIn('middle_column_caption', response.context)\n self.assertIn('right_column_title', response.context)", "def build_nested_blocks(self):\n pass", "def create_definition(self, block_type, slug=None):\n raise NotImplementedError()", "def test_word_cloud_constructor(self):\r\n fragment = self.runtime.render(self.item_descriptor, 'student_view')\r\n\r\n expected_context = {\r\n 'ajax_url': self.item_descriptor.xmodule_runtime.ajax_url,\r\n 'element_class': self.item_descriptor.location.category,\r\n 'element_id': self.item_descriptor.location.html_id(),\r\n 'num_inputs': 5, # default value\r\n 'submitted': False # default value\r\n }\r\n self.assertEqual(fragment.content, self.runtime.render_template('word_cloud.html', expected_context))", "def create_base_image(self, builder, template, parameters):", "def __init__(self, *args, **kwargs):\n EasyBlock.__init__(self, *args, **kwargs)\n\n self.build_in_installdir = True", "def __init__(self, *args, **keywords):\n attribs = keywords if (len(args) <= 0) else list(args).pop()\n Entity.__init__(self, attribs)", "def initialize(context):\n ##code-section custom-init-top #fill in your manual code here\n ##/code-section custom-init-top\n\n # imports packages and types for registration\n import content\n\n\n # Initialize portal content\n all_content_types, all_constructors, all_ftis = process_types(\n listTypes(PROJECTNAME),\n PROJECTNAME)\n\n cmfutils.ContentInit(\n PROJECTNAME + ' Content',\n content_types = all_content_types,\n permission = DEFAULT_ADD_CONTENT_PERMISSION,\n extra_constructors = all_constructors,\n fti = all_ftis,\n ).initialize(context)\n\n # Give it some extra permissions to control them on a per class limit\n for i in range(0,len(all_content_types)):\n klassname=all_content_types[i].__name__\n if not klassname in ADD_CONTENT_PERMISSIONS:\n continue\n\n context.registerClass(meta_type = all_ftis[i]['meta_type'],\n constructors= (all_constructors[i],),\n permission = ADD_CONTENT_PERMISSIONS[klassname])\n\n ##code-section custom-init-bottom #fill in your manual code here\n ##/code-section custom-init-bottom", "def __init__(self, parent, blocks=None):\n self.parent = parent\n self.x = int(parent.block_width / 2) ## x coord of base block element\n self.y = parent.block_height\n\n ## Setup rotate variable\n self.rotate = 0\n\n if blocks:\n self.block_elements = blocks ## Relative coords\n else:\n self.block_elements = [(-1,0),(0, 0),(1,0),(2,0)] ## Relative coord", "def __init__(self, parent, connection):\n ConnectionScript.__init__(self, parent, connection)\n self.entity = Entity()", "def initialize(context):\n # Fill in modules that provide content implementations:\n from content import fundingfolder, fundingopportunity, announcement\n contentTypes, constructors, ftis = atapi.process_types(atapi.listTypes(config.PROJECTNAME), config.PROJECTNAME)\n for atype, constructor in zip(contentTypes, constructors):\n Products.CMFCore.utils.ContentInit(\n '%s: %s' % (config.PROJECTNAME, atype.portal_type),\n content_types=(atype,),\n permission=config.ADD_PERMISSIONS[atype.portal_type],\n extra_constructors=(constructor,)\n ).initialize(context)", "def __init__(self):\n\t\tself.field = \"scopeAndContent\"\n\t\tself.lang = \"eng\"\n\t\tself.normal = True", "def __init__(self, model_pool):\n super(Template, self).__init__(model_pool)\n self.nlp_model = None", "def _construct_block(self, block_info):\n layer_name = block_info[0]\n if layer_name=='Conv2d':\n in_channels, out_channels, kernel_size = block_info[1:]\n return nn.Conv2d(in_channels=in_channels,\n out_channels=out_channels,\n kernel_size=kernel_size)\n elif layer_name=='ReLU':\n return nn.ReLU(inplace=True)\n elif layer_name=='MaxPool2d':\n kernel_size, stride = block_info[1:]\n return nn.MaxPool2d(kernel_size=kernel_size,\n stride=stride)\n elif layer_name=='BatchNorm2d':\n num_features = block_info[1]\n return nn.BatchNorm2d(num_features=num_features)\n elif layer_name=='Linear':\n in_features, out_features = block_info[1:]\n return nn.Linear(in_features=in_features,\n out_features=out_features)\n else:\n raise Exception(\"_construct_block cannot construct block\")", "def doMakeEyeballTemplate(self):\n \"\"\"\n returnList = []\n templObjNameList = []\n templHandleList = []\n \"\"\"\n try:\n log.debug(\">>> doMakeLimbTemplate\")\n assert self.cls == 'TemplateFactory.go',\"Not a TemlateFactory.go instance!\"\n\n #Gather limb specific data and check\n #==============\n mi_helper = self._mi_module.helper\n if not mi_helper:\n raise StandardError,\"No helper found!\"\n\n b_irisControl = mi_helper.irisHelper\n b_pupilControl = mi_helper.pupilHelper\n\n mi_helper.parent = self._mi_module.templateNull\n except Exception,error:raise Exception,\"doMakeEyeballTemplate | {0}\".format(error)\n\n\n return True", "def __init__(self):\n self.unconfirmed_transactions = [] \n self.chain = []\n self.create_genesis_block()", "def construct_xblock(self, block_type, scope_ids, field_data=None, *args, **kwargs):\n return self.construct_xblock_from_class(\n cls=self.load_block_type(block_type),\n scope_ids=scope_ids,\n field_data=field_data,\n *args, **kwargs\n )", "def __init__(self, *args, **kwargs):\n kwargs.pop('widget_syntax')\n\n super(TemplateForm, self).__init__( *args, **kwargs)\n print self.fields", "def __init__(self, template_name, **kwargs):\n self.template_name = template_name\n self.model = dict(**kwargs)", "def initialize(self, inputs, base_block, n_repeats, *args, **kwargs):\n for repeat in range(n_repeats):\n if args:\n for i, item in enumerate(args):\n # Make block\n if isinstance(item, dict):\n block_constructor = item.pop('base_block', None) or base_block\n block_args = {'inputs': inputs, **dict(Config(kwargs) + Config(item))}\n block = block_constructor(**block_args)\n elif isinstance(item, nn.Module):\n block = item\n else:\n raise ValueError(f'Positional arguments of Block must be either dicts or nn.Modules, '\n f'got {type(item)} instead!')\n\n inputs = self.initialize_block(inputs, block, f'repeat{repeat}-args{i}')\n\n else:\n # Make block\n block = base_block(inputs=inputs, **kwargs)\n inputs = self.initialize_block(inputs, block, f'repeat{repeat}')", "def setUp(self):\n super().setUp()\n # to adjust the block to be tested, update block_name_to_be_tested before calling setup_course.\n self.block_name_to_be_tested = 'html_block'" ]
[ "0.8255997", "0.6097554", "0.574299", "0.56744987", "0.5653919", "0.5599532", "0.55128586", "0.5449456", "0.5448524", "0.54456073", "0.53858835", "0.53586984", "0.52999777", "0.5286158", "0.527467", "0.52528787", "0.5189372", "0.5186057", "0.5185921", "0.5180944", "0.51229626", "0.5118272", "0.5109273", "0.5092073", "0.5078207", "0.50756866", "0.50669307", "0.50580776", "0.5054754", "0.50493234" ]
0.8411368
0
Terminate the server process
def terminate(self): if self.proc: logging.info("Terminating Proxy Server...") self.proc.terminate() self.proc = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def terminate(self):\n print('Terminating Revshell thread.')\n self.server.close()", "def stop():\n global server_handle\n server_handle.kill()\n server_handle = None", "def exit(self):\n self.tcp_server_exit_event.set()\n for _, process in self.name_to_process.items():\n process.terminate()", "def terminate(self):\n self._stop_proc(signal.SIGTERM)", "def terminate(self):\n self.sock.close()\n try:\n self.process.terminate()\n self.process.wait(timeout=self.STOP_TIMEOUT)\n except TimeoutExpired:\n self.process.kill()\n shutil.rmtree(self.rundir)", "def stop(self):\n self.shutdown_ = True\n if self.running():\n os.kill(self.server_pid_, signal.SIGTERM)", "def stop_server(request):\n def stop_callback():\n global process\n process.terminate()\n request.addfinalizer(stop_callback)", "def stop():\n server = current_server()\n server.stop()", "def terminate_server(self, port):\n proc = self.processes.pop(port, None)\n if proc is None:\n raise ValueError(f\"Server for port {port} does not exists.\"\n \"It might have been closed already.\"\n )\n proc.terminate()", "def Quit(self):\n t = threading.Thread(target=self.server.shutdown)\n t.start()", "def terminate(self):\n self._proc.terminate()", "def close(self):\n self._server.shutdown()\n self._server = None", "def stop(self):\n\n self._stop_server = True\n\n self.join()\n self.httpd.server_close()", "def stop() -> None:\n global _server\n if _server:\n try:\n _server.shutdown()\n except Exception:\n pass", "def webserver_stop():\n run(\"kill $(cat %s)\" % GUNICORN_PIDFILE)\n run(\"rm %s\" % GUNICORN_PIDFILE)", "def stop(self):\n self.logger.info('Shutting down SimpleHTTPServer')\n stop_cmd = \"pkill -9 -f '{0}'\".format(self.server_cmd)\n self._execute_command(stop_cmd)", "def terminate(self):\n while self._conns:\n conn = self._conns.pop()\n try:\n conn.send((self.EXIT, ()))\n except BrokenPipeError:\n pass\n conn.close()\n while self._processes:\n p = self._processes.pop()\n p.join(1)\n if p.exitcode is None:\n # Force termination if necessary\n p.terminate()\n p.join()\n self._running = False", "async def quit(self):\n await self.kill_server()\n await self.logout()", "def terminate(self):\n self.send_signal(signal.SIGTERM)", "def kill(self):\n if self.server:\n try:\n self.server.exit = True\n self.server.p.kill()\n self.server.p.wait()\n except OSError:\n pass\n self.dead = True", "async def kill_server(self):\n if await self._kill():\n await self.send('Server killed')", "def stop_server(self):\r\n # TODO-SDH Add way to stop the server from running.\r", "def shutdown(self):\n try:\n self._request(\"POST /shutdown\")\n time.sleep(0.300)\n except requests.exceptions.ConnectionError:\n pass\n if self._process and self._process.poll() is None:\n self._process.kill()\n if self._session:\n self._session.close()", "def stop():\n tidyup()\n shutdown_server()\n return \"Parando Servidor\"", "def stop():\n tidyup()\n shutdown_server()\n return \"Parando Servidor\"", "def stop():\n tidyup()\n shutdown_server()\n return \"Parando Servidor\"", "def terminate(self):\n self._worker.kill()", "def destroy_server(server):\n server.stop_and_destroy()", "def force_stop(self):\n\n # Stopping thread\n self.quit()\n\n # Killing all running processes\n ProcessManager(self.cf_process).close_all_child()\n ProcessManager(self.server_process).close_all_child()", "def Quit(self, timeout=5.0):\n assert self._process, 'server was not started'\n if self._process.poll() is None:\n try:\n urllib2.urlopen(self.url + QUIT_PATH)\n except urllib2.URLError:\n\n\n pass\n\n finish_time = time.time() + timeout\n while time.time() < finish_time and self._process.poll() is None:\n time.sleep(0.2)\n if self._process.returncode is None:\n logging.warning('api_server did not quit cleanly, killing')\n self._process.kill()" ]
[ "0.7887827", "0.7732045", "0.75751805", "0.74291086", "0.74241346", "0.7410972", "0.73573947", "0.7319716", "0.72791415", "0.7265624", "0.725529", "0.7247277", "0.7217128", "0.72014165", "0.71880674", "0.7184858", "0.71616644", "0.71488863", "0.7148135", "0.7137019", "0.7064839", "0.70593995", "0.70140004", "0.70125234", "0.70125234", "0.70125234", "0.70089906", "0.7006845", "0.6963901", "0.69482046" ]
0.77705073
1
Create a RPC server that uses an websocket that connects to a proxy.
def websocket_proxy_server(url, key=""): def create_on_message(conn): def _fsend(data): data = bytes(data) conn.write_message(data, binary=True) return len(data) on_message = rpc._CreateEventDrivenServer(_fsend, "WebSocketProxyServer") return on_message @gen.coroutine def _connect(key): conn = yield websocket.websocket_connect(url) on_message = create_on_message(conn) temp = _server_env() # Start connecton conn.write_message(struct.pack('@i', RPC_MAGIC), binary=True) key = "server:" + key conn.write_message(struct.pack('@i', len(key)), binary=True) conn.write_message(key.encode("utf-8"), binary=True) msg = yield conn.read_message() assert len(msg) >= 4 magic = struct.unpack('@i', msg[:4])[0] if magic == RPC_MAGIC + 1: raise RuntimeError("key: %s has already been used in proxy" % key) elif magic == RPC_MAGIC + 2: logging.info("RPCProxy do not have matching client key %s", key) elif magic != RPC_MAGIC: raise RuntimeError("%s is not RPC Proxy" % url) logging.info("Connection established") msg = msg[4:] if msg: on_message(bytearray(msg), 3) while True: try: msg = yield conn.read_message() if msg is None: break on_message(bytearray(msg), 3) except websocket.WebSocketClosedError as err: break logging.info("WebSocketProxyServer closed...") temp.remove() ioloop.IOLoop.current().stop() ioloop.IOLoop.current().spawn_callback(_connect, key) ioloop.IOLoop.current().start()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def client_server(request):\n def build(handler, host=\"localhost\", port=None, *, loop=None):\n loop = loop or asyncio.get_event_loop()\n port = port or get_next_port(host)\n\n server = serve(handler, host, port, klass=WebSocket, loop=loop)\n server = loop.run_until_complete(server)\n\n client = connect(\"ws://{}:{}\".format(host, port))\n client = loop.run_until_complete(client)\n return client, server\n\n return build", "def create(addr='127.0.0.1', port=0, options=None):\n if options is None:\n options = {}\n\n backend = MitmProxy(addr, port, options)\n\n t = threading.Thread(name='Selenium Wire Proxy Server', target=backend.serve_forever)\n t.daemon = not options.get('standalone')\n t.start()\n\n addr, port, *_ = backend.address()\n log.info('Created proxy listening on %s:%s', addr, port)\n\n return backend", "async def main():\n await serve_websocket(handle_server, SERVER, PORT, ssl_context=None)", "async def create_websocket_server(sock, filter=None): # pylint: disable=W0622\n ws = Websocket()\n await ws.start_server(sock, filter=filter)\n return ws", "def start(self, host: str = '127.0.0.1', port: int = 6543) -> None:\n\n def _go(handler):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n\n start_server = websockets.serve(handler, host, port)\n\n asyncio.get_event_loop().run_until_complete(start_server)\n asyncio.get_event_loop().run_forever()\n\n if not self.running:\n # test if other instance is already running\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as test_sock:\n status = test_sock.connect_ex((host, port))\n\n # check for both Windows and Linux status codes\n if status in {10061, 111}: # nothing running\n server_thread = threading.Thread(\n target=_go, args=(ProxyServer._ws_handler,))\n server_thread.start()\n\n logger.info(f'Proxy server running on ws://{host}:{port}/')\n self.running = True\n else:\n logger.warning(\n f'Connection test to {host}:{port} returned status {status}, '\n 'proxy server not started')", "def setup_websocket(ws_url, service_account_file, audience, router_password, source_port, dest_ip, dest_port):\n def on_message(ws, message):\n \"\"\"Handle a message\"\"\"\n handle_message(ws, message, router_password, source_port, dest_ip, dest_port)\n\n def on_error(ws, error):\n \"\"\"Handle an error by exiting or closing if it is a KeyboardInterrupt (Ctrl+C)\"\"\"\n if type(error) is KeyboardInterrupt:\n logger.info('Cancel requested (Ctrl+C), closing connection.')\n ws.close()\n else:\n logger.error(\"The following error occurred:\\n{error}\".format(error=error))\n sys.exit(1)\n\n def on_close(ws):\n \"\"\"Handle the WebSocket close\"\"\"\n logger.info('WebSocket closed.')\n\n def on_open(ws):\n \"\"\"Handle the WebSocket opening\"\"\"\n logger.info('WebSocket open, sending authentication.')\n authenticate(ws, service_account_file, audience)\n ws.send(STATUS_COMMAND_FORMAT.format(status_payload=json.dumps(get_status(router_password, source_port, dest_ip, dest_port))))\n\n return websocket.WebSocketApp(ws_url,\n on_open=on_open,\n on_message=on_message,\n on_error=on_error,\n on_close=on_close)", "def start_server():\n host = 'localhost'\n port = 8080\n listener = socket.socket(socket.AF_INET)\n listener.bind((host, port))\n print 'Serving on {0}:{1}.'.format(host, port)\n listener.listen(0)\n while 1:\n connection, address = listener.accept()\n print 'Got connection from {}'.format(address)\n threading.Thread(\n target=Proxy, args=(connection, )).run()", "def Connect(server, port):\n addr = 'http://%s:%d' % (server, port)\n logging.debug('Connecting to RPC server at %s', addr)\n return jsonrpclib.ServerProxy(addr, allow_none=True)", "def make_server(connect_handler=None, message_handler=None, disconnect_handler=None):\n class Server(tornado.websocket.WebSocketHandler):\n def open(self):\n print('new connection')\n if connect_handler:\n return connect_handler(self)\n\n def on_message(self, message):\n if message_handler:\n return message_handler(json.loads(message), self)\n\n def on_close(self):\n print('connection closed')\n if disconnect_handler:\n return disconnect_handler(self)\n\n def check_origin(self, origin):\n return True\n return Server", "async def server_main(loop, proxy_config, server_config):\n\n controller = Controller(\n MessageProxy(proxy_config),\n hostname=server_config['listen']['addr'],\n port=server_config['listen']['port'],\n )\n controller.start()", "def Connect(server, port=common_lib.SERVER_PORT):\n addr = 'https://%s:%d' % (server, port)\n logging.debug('Connecting to RPC server at %s', addr)\n return jsonrpclib.ServerProxy(addr, allow_none=True)", "def _ws_connect(self):\n\n return websocket.websocket_connect(\n 'ws://localhost:{}{}'.format(self.get_http_port(), self.request)\n )", "def __init__(self,\n *,\n qrcode: bool = False,\n host: str = \"0.0.0.0\",\n port: int = 8000,\n logger: logging.Logger = logging.getLogger(\n 'mvt.phone_sensor'),\n log_level: int = logging.WARN,\n proxy_client_from: Optional[str] = None):\n\n self._ws: Optional[websockets.WebSocketServerProtocol] = None\n self._out: Queue[Union[websockets.Data, ClientDisconnect]] = Queue()\n self._waiting = False\n self._qrcode = qrcode\n self._proxy_client_from = proxy_client_from\n self.logger = logger\n self.logger.setLevel(log_level)\n self.client_connected = False\n self.loop = asyncio.new_event_loop()\n self._in: asyncio.Queue[str] = asyncio.Queue(loop=self.loop)\n self.stop_flag = self.loop.create_future()\n\n self.server_thread = Thread(target=self._start_server,\n kwargs={'host': host, 'port': port},\n daemon=True)\n self.server_thread.start()\n assert self._out.get() == 'ready', \"server failed to start\"", "def start_server(self):\n self.logger.info(\"Starting WebSocket server on port %d\" % self.port)\n http_server = Thread(target=tornado.ioloop.IOLoop.instance().start)\n http_server.start()", "def websocket(self) -> Websocket:\n self.__http_client.data_snapshot()\n host_uri = f'ws://{self.__http_client.host_ip}/api/v1/data/stream'\n subprotocols = [f'SessionToken_{self.__http_client.session_token}', \"object\"]\n return Websocket(host_uri, subprotocols, timeout=self.__http_client.request_timeout)", "def make_tcp_proxy(server_addr, dest_ddr):\n handler = partial(proxy_to, dest_addr)\n proxy_server = StreamServer(server_addr, handler)\n return proxy_server", "def start_websocket_server(self, addr, port):\n app = SLSApplication(self, default_host=addr)\n app.listen(port)\n log.info(f\"Serving SLS/Websocket on ({addr}, {port})\")\n tornado.ioloop.IOLoop.current().start()", "def main():\n global APP\n APP = make_app()\n APP.clients = [] # global list of all connected websocket clients\n APP.printer = Serial('/dev/ttyUSB0', baudrate=19200)\n APP.listen('1337', '0.0.0.0')\n log('Listening on http://0.0.0.0:1337')\n tornado.ioloop.IOLoop.current().start()", "def start(self):\n #url = '{}://{}:{}/'.format('http',\n # self.ip,\n # self.port)\n #self.service_info = ServiceInfo(\n # '_webthing._sub._http._tcp.local.',\n # '{}._http._tcp.local.'.format(self.name),\n # address=socket.inet_aton(self.ip),\n # port=self.port,\n # properties={\n # 'url': url,\n # },\n # server='{}.local.'.format(socket.gethostname()))\n #self.zeroconf = Zeroconf()\n #self.zeroconf.register_service(self.service_info)\n\n # If WebSocketS used and NOT running in thread, and WebServer IS\n # running in thread make shure WebServer has enough stack size to\n # handle also the WebSocket requests.\n log.info('Starting Web Server')\n self.server.Start(threaded=srv_run_in_thread, stackSize=8192)", "def get_websocket(host, port, route='/', ssl=False):\n client = MessageBusClient(host, port, route, ssl)\n client.run_in_thread()\n return client", "def websocket_servient():\n\n ws_port = find_free_port()\n ws_server = WebsocketServer(port=ws_port)\n\n servient = Servient(catalogue_port=None)\n servient.add_server(ws_server)\n\n @tornado.gen.coroutine\n def start():\n raise tornado.gen.Return((yield servient.start()))\n\n wot = tornado.ioloop.IOLoop.current().run_sync(start)\n\n property_name_01 = uuid.uuid4().hex\n property_name_02 = uuid.uuid4().hex\n action_name_01 = uuid.uuid4().hex\n event_name_01 = uuid.uuid4().hex\n\n td_dict = {\n \"id\": uuid.uuid4().urn,\n \"name\": uuid.uuid4().hex,\n \"properties\": {\n property_name_01: {\n \"observable\": True,\n \"type\": \"string\"\n },\n property_name_02: {\n \"observable\": True,\n \"type\": \"string\"\n }\n },\n \"actions\": {\n action_name_01: {\n \"input\": {\n \"type\": \"object\"\n },\n \"output\": {\n \"type\": \"string\"\n },\n }\n },\n \"events\": {\n event_name_01: {\n \"type\": \"string\"\n }\n },\n }\n\n td = ThingDescription(td_dict)\n\n exposed_thing = wot.produce(td.to_str())\n exposed_thing.expose()\n\n @tornado.gen.coroutine\n def action_handler(parameters):\n input_value = parameters.get(\"input\")\n arg_b = input_value.get(\"arg_b\") or uuid.uuid4().hex\n raise tornado.gen.Return(input_value.get(\"arg_a\") + arg_b)\n\n exposed_thing.set_action_handler(action_name_01, action_handler)\n\n yield servient\n\n @tornado.gen.coroutine\n def shutdown():\n yield servient.shutdown()\n\n tornado.ioloop.IOLoop.current().run_sync(shutdown)", "def __init__(self, target_addr: str, target_port: int, max_worker_threads: int = 2):\n super().__init__(max_worker_threads=max_worker_threads)\n self._addr: str = target_addr\n self._port: int = target_port\n self._websocket: WebSocketClientProtocol = None\n self._is_running: bool = False", "def new(configuration: Mapping[str, Any], loop: AbstractEventLoop) \\\n -> ProxyProtocol:\n return SocksProxy(loop)", "def serve_proxy_forever(local_port, remote_addr, remote_port):\n\n WebInterfaceHandlerLocal = get_WebInterfaceHandlerLocal(remote_addr, remote_port)\n\n def get_server_on_port(port, use_ssl=False):\n server = HTTPServer((\"\", port), WebInterfaceHandlerLocal)\n if use_ssl:\n # TODO this is just copy&paste from Woolnote and is non-functional in its current form; it's just there for the future in case someone wants this proxy expose it as https\n try:\n print(\"use_ssl=True, trying\")\n ssl_cert_path = os.path.join(config.PATH_DIR_FOR_SSL_CERT_PEM, config.FILE_CERT_PEM)\n ssl_key_path = os.path.join(config.PATH_DIR_FOR_SSL_KEY_PEM, config.FILE_KEY_PEM)\n server.socket = ssl.wrap_socket(server.socket, certfile=ssl_cert_path,\n keyfile=ssl_key_path, server_side=True,\n suppress_ragged_eofs=True)\n # TODO: for some reason, suppress_ragged_eofs is ignored\n except:\n print(\"use_ssl=True, FAILED!\")\n else:\n print(\"use_ssl=False\")\n print(\"returning server\")\n return server\n\n def serve_on_port(port, use_ssl=False):\n server = get_server_on_port(port, use_ssl)\n print(\"trying serve_forever\")\n server.serve_forever()\n\n server_http = get_server_on_port(local_port, False)\n\n def serve_forever(*servers):\n # https://stackoverflow.com/questions/60680/how-do-i-write-a-python-http-server-to-listen-on-multiple-ports\n import select\n while True:\n r, w, e = select.select(servers, [], [], 10)\n for server in servers:\n if server in r:\n server.handle_request()\n\n serve_forever(server_http)", "def entry_point(proxy_port_number):\n\n setup_sockets(proxy_port_number)\n print(\"*\" * 50)\n print(\"[entry_point] Implement me!\")\n print(\"*\" * 50)\n return None", "def start_proxy_handler(self) -> None:\n\n def proxy_handler(listener: socket.socket) -> None:\n sock = listener.accept()[0]\n with self.server_ctx.wrap_socket(sock, server_side=True) as client_sock:\n upstream_sock = socket.create_connection(\n (self.destination_server_host, self.destination_server_port)\n )\n self._read_write_loop(client_sock, upstream_sock)\n upstream_sock.close()\n client_sock.close()\n\n self._start_server(proxy_handler)", "def start(turn_handler):\n\n if os.environ.get('BOTBOX_SECRET'):\n print('Using env secret:', os.environ['BOTBOX_SECRET'])\n headers = {'Authorization': os.environ['BOTBOX_SECRET']}\n elif len(sys.argv) > 1:\n print('Using cli secret:', sys.argv[1])\n headers = {'Authorization': sys.argv[1]}\n else:\n print('Using no authentication')\n headers = []\n\n # get the URL for the server from an environment variable if it is set,\n # otherwise use the default localhost\n if os.environ.get('BOTBOX_SERVER'):\n url = (WS_SERVER_SCHEME + '://'\n + os.environ['BOTBOX_SERVER'] + ':' + WS_SERVER_PORT)\n else:\n url = WS_SERVER_SCHEME + '://' + WS_SERVER_URL + ':' + WS_SERVER_PORT\n\n print(\"Connecting to:\", url)\n\n ws = websocket.WebSocketApp(\n url,\n on_open = _on_open,\n on_message = lambda ws, msg: _on_message(ws, msg, turn_handler),\n on_error = _on_error,\n on_close = _on_close,\n header = headers\n )\n\n ws.run_forever()", "def start_server():\n server = WebsocketServer(9001, host='0.0.0.0')\n server.set_fn_message_received(message_received)\n server.set_fn_client_left(client_left)\n print(\"Started\")\n server.run_forever()", "def __init__(self, addr=(\"localhost\", 8000)):\n\n Thread.__init__(self)\n RPCBaseProxy.__init__(self)\n\n self.__instances = []\n\n self.__server = SimpleXMLRPCServer(addr=addr, logRequests=False, requestHandler=SimpleXMLRPCRequestHandler)\n self.__server.register_instance(self)", "def create_server(\n handle_event: EventCallback,\n host: str = \"0.0.0.0\",\n port: int = 0,\n ssl_context: Optional[SSLContext] = None,\n) -> Server:\n return Server(handle_event, host, port, ssl_context)" ]
[ "0.6948238", "0.6761193", "0.6707289", "0.66721493", "0.65216213", "0.64277166", "0.6424164", "0.63957137", "0.6342866", "0.63294774", "0.6319867", "0.62677985", "0.6209199", "0.61780703", "0.61010545", "0.609926", "0.6053257", "0.6047363", "0.6036524", "0.6031591", "0.6018185", "0.60020804", "0.5997754", "0.5992707", "0.5953333", "0.59464824", "0.5927569", "0.59157777", "0.5914288", "0.5909134" ]
0.74283665
0
Initializes the embeddings, depending on the embedding type.
def _initialize_embeddings(self): with tf.variable_scope(self.scope): init_temporal_s = np.sqrt( 6. / (self._config.nact_dict["num_s"] + self._config.ndim_emb + 1)) self.w_dt = tf.get_variable( name="w_dt", shape=[1, self._config.ndim_emb], initializer=tf.initializers.random_uniform( -init_temporal_s, init_temporal_s)) if self._config.embedding_type not in self._embedding_classes: raise ValueError( f"Unknown embedding type: {self._config.embedding_type}.") self.embedding = self._embedding_classes[self._config.embedding_type]( self._config, self._embed_dim_dict)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init(self, preload_embeddings):\n\t\tself.__find_metadata()\n\t\tself.__parse_embedding_metadata()\n\t\tself.__parse_model_metadata()\n\t\t# should we load all of the word embeddings into memory now?\n\t\tif preload_embeddings:\n\t\t\tlog.info(\"Preloading word embeddings ...\")\n\t\t\tfor embed_id in self.embedding_meta:\n\t\t\t\tself.get_embedding(embed_id)\t\n\t\t\tlog.info(\"Preloaded %d word embeddings\" % len(self.embedding_cache))", "def init_emb(self):\n # Initialize users and items' embeddings\n nn.init.xavier_uniform_(self.user_embedding.weight)\n nn.init.xavier_uniform_(self.item_embedding.weight)", "def set_embeddings(self):", "def randomly_init_embeddings(self, embed_dim):\n self.embed_dim = embed_dim\n self.embeddings = np.random.rand(self.size(), embed_dim)\n for term in [self.pad_term, self.unk_term, self.eos_term]:\n self.embeddings[self.get_id(term)] = np.zeros([self.embed_dim])", "def init_embedding(self):\n self.embedding.weight.data.uniform_(-1./self.num_embeddings, 1./self.num_embeddings)", "def __init__(self, num_words, embedding_size, use_cuda):\n super(StandardEmbedding, self).__init__()\n self.embedding_size = embedding_size\n self.num_hash_functions = 0\n self.embeddings = nn.Embedding(num_words, embedding_size)\n self.embeddings = self.embeddings.cuda() if use_cuda else self.embeddings", "def embeddings_layers_init(self):\n\n user_embeddings = tf.keras.layers.Embedding(\n self.n_users, self.user_dim, input_length=1)\n\n item_embeddings = tf.keras.layers.Embedding(\n self.n_items, self.item_dim, input_length=1)\n\n return user_embeddings, item_embeddings", "def init_embedding(embeddings):\n bias = np.sqrt(3.0 / embeddings.size(1))\n torch.nn.init.uniform_(embeddings, -bias, bias)", "def setUp(self):\n self._vocab = np.array([\"one\", \"two\", \"three\", \"four\",\n \"five\", \"six\", \"seven\", \"eight\", \"nine\"])\n self._embedding_dim = 2\n\n self._default_config = {\n \"vocab\": self._vocab,\n \"embedding_dim\": self._embedding_dim,\n \"position_encoding\": None\n }", "def setup_embeddings(self):\n with vs.variable_scope(\"embeddings\"):\n vec_embeddings = tf.get_variable(\"embeddings\", initializer=self.pretrained_embeddings, trainable=False)\n context_batch_embeddings = tf.nn.embedding_lookup(vec_embeddings, self.context_placeholder)\n question_batch_embeddings = tf.nn.embedding_lookup(vec_embeddings, self.question_placeholder)\n context_embeddings = tf.reshape(context_batch_embeddings,\n (-1, self.max_context_len, self.vocab_dim))\n question_embeddings = tf.reshape(question_batch_embeddings,\n (-1, self.max_question_len, self.vocab_dim))\n return context_embeddings, question_embeddings", "def __init__(self):\n\n # Load embeddings index\n self.embeddings = self.load()\n self.console = Console()", "def load_pretrained_embeddings(self, embeddings):\r\n self.embedding.weight = nn.Parameter(embeddings)", "def get_embeddings(self, entities, type='entity'):\n return None", "def load_pretrained_embeddings(self, embeddings):\n self.embedding.weight = nn.Parameter(embeddings)", "def init_embeddings(self, weight, words):\n # wrap in tensor\n if isinstance(weight, list):\n weight = torch.Tensor(weight).float()\n if isinstance(weight, np.ndarray):\n weight = torch.from_numpy(weight).float()\n # check embedding size\n if weight.size(1) != self.embedding_dim:\n raise ValueError(\"Mismatched embedding dim {} for model \"\n \"with dim {}\".format(weight.size(1),\n self.embedding_dim))\n\n self_idxs, other_idxs = [], []\n for other_idx, word in enumerate(words):\n try:\n self_idxs.append(self.d.s2i[word])\n other_idxs.append(other_idx)\n except KeyError:\n pass\n\n other_idxs = torch.LongTensor(other_idxs)\n self_idxs = torch.LongTensor(self_idxs)\n self.weight.data[self_idxs] = weight[other_idxs]", "def __init__(self, embeddings, char_embeddings=None,\n pos_embeddings=None, **kwargs):\n super(WordRepresentationLayer, self).__init__()\n self.embeddings = embeddings\n self.embedding_dim = embeddings.embedding_dim\n self.char_embeddings = char_embeddings\n self.train_char_embeddings = kwargs.get('train_char_embeddings',\n False)\n self.use_cuda = kwargs.get('cuda', True)\n\n if self.char_embeddings:\n self.char_merging_method = kwargs.get('char_merging_method', 'sum')\n char_hidden_dim = kwargs.get('char_hidden_dim', 50)\n bidirectional = kwargs.get('bidirectional', False)\n\n if self.char_merging_method == 'lstm':\n self.char_encoder = LSTMCharEncoder(\n char_embeddings,\n char_hidden_dim,\n bidirectional,\n train_char_embeddings=self.train_char_embeddings,\n cuda=self.use_cuda)\n\n self.embedding_dim += char_hidden_dim\n\n elif self.char_merging_method in ['mean', 'sum']:\n self.char_encoder = LinearCharEncoder(\n char_embeddings,\n train_char_embeddings=self.train_char_embeddings,\n char_merging_method=self.char_merging_method)\n\n self.embedding_dim += self.char_embeddings.embedding_dim\n else:\n raise NotImplementedError\n\n self.pos_embeddings = pos_embeddings\n if self.pos_embeddings:\n self.embedding_dim += self.pos_embeddings.embedding_dim", "def construct_embedding(self):\n i = 0\n self.load_dicts()\n embedding_shape = (max(self.word2idx.values()) + 1,\n self.embedding_size)\n self.embedding = np.zeros(embedding_shape)\n\n with open(self.config.word_vec_fi_glove, 'r') as fi:\n for line in fi:\n word_vec = line.split(\" \")[1:]\n self.embedding[i, :] = np.array(word_vec, dtype=np.float32)\n i += 1\n\n self.write_embedding()", "def build_word_embeddings(self):\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n elif self.mode == \"test\":\n encode_emb1 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb1\")\n # No sequences to decode.\n encode_emb2 = tf.placeholder(tf.float32, (\n None, None, self.config.word_embedding_dim), \"encode_emb2\")\n else:\n word_emb = tf.get_variable(\n name=\"word_embedding\",\n shape=[self.config.vocab_size, self.config.word_embedding_dim],\n initializer=self.uniform_initializer)\n\n encode_emb1 = tf.nn.embedding_lookup(word_emb, self.encode_ids1)\n encode_emb2 = tf.nn.embedding_lookup(word_emb, self.encode_ids2)\n\n\n self.encode_emb1 = encode_emb1\n self.encode_emb2 = encode_emb2", "def __init__(self):\n # Initialise class attributes (visibility ease)\n self.__corpus__ = None\n self.__pron_det_pos_words__ = None\n self.__triples_corpus__ = None\n self.__entities_in_doc__ = None\n self.__wvmodel__ = None\n \n # For purpose of parsing relation triplets later\n # Load pretrained embedding model\n #plog('Loading pretrained word embeddings. This will take some time to load...')\n #self.__wvmodel__ = api.load('fasttext-wiki-news-subwords-300')\n #plog('Pretrained word embeddings loaded!')", "def __init__(self,\n embedder: Embedder,\n words: Set[str] = None,\n init_cache_size: int = 1,\n max_cache_size: int = None,\n cuda: bool = False):\n self.max_cache_size = max_cache_size or sys.maxsize\n if init_cache_size > self.max_cache_size:\n raise ValueError('Initial cache size cannot be larger than maximum cache size')\n words = words or set()\n word_to_index = self._init_word_to_index(words)\n super(WordBatchEmbedder, self).__init__(embedder, word_to_index, init_cache_size=init_cache_size, cuda=cuda)\n self.cached_words = set(self.word_to_index.keys())\n\n self.num_hits = 0\n self.num_misses = 0\n self.num_batches = 0", "def __init__(self):\n # super(MultiEmbedding,self).__init__()\n HyperParameters.__init__(self)", "def init_weights(self):\n\n super().init_weights()\n\n init_type = None if self.init_cfg is None else self.init_cfg.get(\n 'type', None)\n if init_type != 'Pretrained' and self.with_tsa:\n for module in [\n self.fusion.feat_fusion, self.fusion.spatial_attn1,\n self.fusion.spatial_attn2, self.fusion.spatial_attn3,\n self.fusion.spatial_attn4, self.fusion.spatial_attn_l1,\n self.fusion.spatial_attn_l2, self.fusion.spatial_attn_l3,\n self.fusion.spatial_attn_add1\n ]:\n kaiming_init(\n module.conv,\n a=0.1,\n mode='fan_out',\n nonlinearity='leaky_relu',\n bias=0,\n distribution='uniform')", "def instantiate_weights(self):\n with tf.variable_scope(\"embedding_projection\"), tf.device('/cpu:0'): # embedding matrix\n self.Embedding = tf.get_variable(\"Embedding\", shape=[self.vocab_size, self.embed_size],\n initializer=self.initializer)\n # self.Embedding_label = tf.get_variable(\"Embedding_label\", shape=[self.num_classes, self.embed_size],\n # dtype=tf.float32) # ,initializer=self.initializer\n # self.W_projection = tf.get_variable(\"W_projection\", shape=[self.sequence_length * self.d_model, self.num_classes],\n # initializer=self.initializer) # [embed_size,label_size]\n # self.b_projection = tf.get_variable(\"b_projection\", shape=[self.num_classes])", "def _add_pre_trained_embedding(self):\n\n if self.embedding_type['type'] == 'glove':\n self.logging.info('use pre-trained glove word2vec')\n # a. load pre trained glove\n GLOVE_DIR = '../data/glove_pretrained/glove.6B'\n glove_suffix_name = 'glove.6B.' + str(self.embedding_size) + 'd.txt'\n import os\n import numpy as np\n\n embeddings_index = {}\n f = open(os.path.join(GLOVE_DIR, glove_suffix_name)) # 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n\n self.logging.info('')\n self.logging.info('Found %s word vectors.' % len(embeddings_index))\n\n # b. compute embedding matrix\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector # words not found in embedding index will be all-zeros.\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt) + ' / ' + str(len(self.word_index)))\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n\n elif self.embedding_type['type'] == 'gensim':\n self.logging.info('use pre-trained gensim word2vec')\n\n import gzip\n import gensim\n from keras.layers import Embedding\n import numpy as np\n\n # fname = '../data/word2vec_pretrained/motors/d_300_k_712904_w_6_e_60_v_motors'\n # fname = '../data/word2vec_pretrained/fashion/d_300_k_1341062_w_6_e_70_v_fashion'\n\n self.logging.info('load word2vec path: ' + str(self.embedding_type['path']))\n model = gensim.models.Word2Vec.load(self.embedding_type['path'])\n pretrained_weights = model.wv.syn0\n vocab_size, vector_dim = pretrained_weights.shape\n\n method = 3\n if method == 1:\n self.logging.info('word2vec attempt to fit into embedding layer - middle complex')\n # convert the wv word vectors into a numpy matrix that is suitable for insertion\n # into our TensorFlow and Keras models\n\n embedding_matrix = np.zeros((len(model.wv.vocab), vector_dim))\n for i in range(len(model.wv.vocab)):\n embedding_vector = model.wv[model.wv.index2word[i]]\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n embedding_layer = Embedding(input_dim=embedding_matrix.shape[0],\n output_dim=embedding_matrix.shape[1],\n # input_length=self.maxlen,\n weights=[embedding_matrix],\n trainable=False)\n elif method == 2:\n self.logging.info('word2vec simple embedding matching - simple complex')\n embedding_layer = Embedding(input_dim=vocab_size,\n output_dim=vector_dim,\n input_length=self.maxlen,\n weights=[pretrained_weights],\n trainable=False)\n elif method == 3:\n\n self.logging.info('word2vec match using word_index from keras tokenizer - as used in glove match above')\n # b. compute embedding matrix\n\n # sd = 1 / np.sqrt(len(self.word_index) + 1)\n # embedding_matrix = np.random.normal(0, scale=sd, size=(len(self.word_index) + 1, self.embedding_size))\n\n embedding_matrix = np.zeros((len(self.word_index) + 1, self.embedding_size))\n cnt = 0\n for word, i in self.word_index.items():\n if word in model.wv:\n embedding_vector = model.wv[word]\n embedding_matrix[i] = embedding_vector\n else:\n # self.logging.info('token in train missing in word2vec: ' + str(word))\n cnt += 1\n self.logging.info('total tokens missing: ' + str(cnt))\n\n\n # c. build embedding layer\n from keras.layers import Embedding\n embedding_layer = Embedding(len(self.word_index) + 1,\n self.embedding_size,\n weights=[embedding_matrix],\n input_length=self.maxlen,\n trainable=False)\n else:\n raise ValueError('unknown method value')\n\n else:\n raise ValueError('unknown embedding type')\n self.logging.info('create glove pre-trained embedding: ' + str(self.embedding_size))\n return embedding_layer", "def init_word_embed(config):\n embedding_mat_val = np.load(config.wordembed_params)\n with tf.variable_scope('vc'):\n with tf.variable_scope('lstm', reuse=True):\n embedding_mat = tf.get_variable(\"embedding_mat\", [config.num_vocab, config.embed_dim])\n init_we = tf.assign(embedding_mat, embedding_mat_val)\n return [init_we]", "def set_embeddings(self, embeddings: torch.Tensor, fine_tune: bool = True) -> None:\n if embeddings is None:\n # initialize embedding layer with the uniform distribution\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n else:\n # initialize embedding layer with pre-trained embeddings\n self.embedding.weight = nn.Parameter(embeddings, requires_grad=fine_tune)", "def init_embedding_weights(self, dictionary, embeddings_index, embedding_dim):\r\n pretrained_weight = np.empty([len(dictionary), embedding_dim], dtype=float)\r\n for i in range(len(dictionary)):\r\n if dictionary.idx2word[i] in embeddings_index:\r\n pretrained_weight[i] = embeddings_index[dictionary.idx2word[i]]\r\n else:\r\n pretrained_weight[i] = helper.initialize_out_of_vocab_words(embedding_dim)\r\n # pretrained_weight is a numpy matrix of shape (num_embeddings, embedding_dim)\r\n if isinstance(self.embedding, nn.Sequential):\r\n self.embedding[0].weight.data.copy_(torch.from_numpy(pretrained_weight))\r\n else:\r\n self.embedding.weight.data.copy_(torch.from_numpy(pretrained_weight))", "def make_embeddings(self):\n\t\tprint(\"Presetting embedding weights\")\n\t\t\t\n\t\tnp.random.seed(0)\n\t\tweights = np.random.uniform(low = -0.05, high = 0.05, size = (self.FREQCAP, self.EMB_SIZE))\n\t\t\n\t\tcounter = 0\n\n\t\twords = []\n\t\tweights_tmp = []\n\n\t\twith open(self.embeddingpath) as handle:\n\t\t\tfor i, line in enumerate(handle):\n\t\t\t\ttmp = line.strip()\n\t\t\t\tif len(tmp) > 0:\n\t\t\t\t\tsplit = tmp.split(\" \")\n\t\t\t\t\tif split[0] in self.worddict and len(split[1:]) == 300:\n\t\t\t\t\t\twords.append(split[0])\n\t\t\t\t\t\tweights_tmp.append([float(a) for a in split[1:]])\n\t\t\n\t\tweights_tmp = np.array(weights_tmp)\n\n\t\tfor word, column in zip(words, weights_tmp):\n\t\t\tif self.worddict[word] < self.FREQCAP:\n\t\t\t\tcounter += 1\n\t\t\t\tweights[self.worddict[word],:] = column\n\t\t\n\t\tprint(\"Set\", counter, \"of\", weights.shape[0], \"columns\")\n\t\t\n\t\tif self.EMB_SIZE < weights.shape[-1]:\n\t\t\tprint(\"Reducing dimensionality to\", self.EMB_SIZE)\n\t\t\tpca = PCA(self.EMB_SIZE)\n\t\t\tweights = pca.fit_transform(weights)\n\t\t\n\t\tself.embeddings = [weights]", "def add_embeddings(self):\n\n with tf.device('/cpu:0'):\n with tf.variable_scope('Embedding_Layer'):\n embeddings = tf.Variable(self.initial_embeddings,name = 'Embeddings')\n self.input_embeddings = tf.nn.embedding_lookup(embeddings, self.inputs_placeholder) #(N,S,D)\n self.question_embeddings = tf.nn.embedding_lookup(embeddings, self.questions_placeholder) #(N,S,D)", "def __init__(self, input_size, config):\r\n super(EmbeddingLayer, self).__init__()\r\n\r\n if config.emtraining:\r\n self.embedding = nn.Sequential(OrderedDict([\r\n ('embedding', nn.Embedding(input_size, config.emsize)),\r\n ('dropout', nn.Dropout(config.dropout))\r\n ]))\r\n else:\r\n self.embedding = nn.Embedding(input_size, config.emsize)\r\n self.embedding.weight.requires_grad = False" ]
[ "0.7415511", "0.7169483", "0.69956625", "0.67939156", "0.6730466", "0.67063075", "0.6464072", "0.641942", "0.63999146", "0.62024164", "0.61996436", "0.6125713", "0.6099087", "0.6088853", "0.607822", "0.60730416", "0.605379", "0.6051636", "0.6051552", "0.6016388", "0.5989231", "0.59752566", "0.5963205", "0.5961812", "0.59314924", "0.5912871", "0.59088683", "0.59022844", "0.5898091", "0.5875716" ]
0.7428394
0
Gets the total expected size of the embedding.
def get_total_embedding_size(self) -> Union[int, List[int]]: features = self._config.context_features + self._config.sequential_features feature_dims = [self._embed_dim_dict[feat] for feat in features] if self._config.embedding_combination_method == ( types.EmbeddingCombinationMethod.SUM_ALL): # Sum all features assert model_utils.all_elements_equal(feature_dims), ( "All embedding dimensions must be equal to combine with sum") return feature_dims[0] elif self._config.embedding_combination_method == ( types.EmbeddingCombinationMethod.CONCATENATE): # Concatenate all features, including upscaled time (dimension = ndim_emb) return sum(feature_dims) + self._config.ndim_emb elif self._config.embedding_combination_method == ( types.EmbeddingCombinationMethod.SUM_BY_SUFFIX): # Sum features with the same suffix, concatenate identity lookup features. feature_suffixes = [get_feature_suffix(feat) for feat in features] total_emb_size = 0 for suffix in set(feature_suffixes): features_to_sum = [] for feat in features: feat_suffix = get_feature_suffix(feat) if feat not in self._config.identity_lookup_features and (feat_suffix == suffix): features_to_sum.append(self._embed_dim_dict[feat]) assert model_utils.all_elements_equal(features_to_sum), ( "All embedding dimensions must be equal to combine with sum") total_emb_size += features_to_sum[0] for feat in self._config.identity_lookup_features: total_emb_size += self._embed_dim_dict[feat] return total_emb_size elif self._config.embedding_combination_method == ( types.EmbeddingCombinationMethod.COMBINE_SNR_OUT): return _flatten_list(feature_dims) + [self._config.ndim_emb] else: raise ValueError("Expected embedding combination method in " "types.EmbeddingCombinationMethod. Got %s" % self._config.embedding_combination_method)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_size(self) -> int:\n total_size = 0\n for entry in self.__entries:\n total_size += entry.get_size()\n return total_size", "def get_final_emb_size(self):\n size = self.n_layers * 1 * 2 * self.hidden_size\n return size", "def _input_size(self):\n return self.embedding_size + self.hidden_size", "def size(self) -> int:\n\n return self.sizes.sum()", "def size(self):\n size = 0\n size += self.data.size * sys.getsizeof(self.data)\n return size / 1024.0 / 1024.0 / 1024.0", "def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n return tmpsize", "def size(self) -> pulumi.Output[float]:\n return pulumi.get(self, \"size\")", "def total_length(self):\n return self.length", "def vocab_size(self) -> int:\n\n return self._vocab_size", "def get_size(self):\n return len(self.get_payload()) + 4", "def totalsize(self):\n return sum([sz for sz in self.iterate()])", "def vocab_size(self):\n return len(self.vocab)", "def vocab_size(self):\n return self._vocab_size", "def get_size(self):\n tmpsize = 0\n for variable in self.variables:\n tmpsize += variable.get_size()\n for subchunk in self.subchunks:\n tmpsize += subchunk.get_size()\n self.size.value = tmpsize\n return self.size.value + self.ID.get_size() + self.size.get_size()", "def validation_size(self) -> int:\n return int(self.data_size * self.__validation_fraction)", "def get_test_size(self):\n return self.test_size", "def test_size(self) -> int:\n return int(self.data_size * self.__test_fraction)", "def total_length(self):\n return abs(self.length)", "def vocab_size(self) -> int:\n return len(self.vocabulary)", "def get_size(self) -> int:\n return self.__size", "def size(self):\n return self._length", "def vocabularySize(self):\n return len(self.vocab.keys())", "def size(self):\n return self.__length", "def get_vocab_size(self):\n return len(self.char_map) + 4", "def size(self):\n return numpy.prod(self.shape)", "def size(self):\r\n return self.__length", "def get_size(self):\n return self.get_params().shape[0]", "def size(self):\n return int(misc.intprod(self.shape))", "def size(self) -> int:\n return self._size", "def size(self) -> int:\n return self._size" ]
[ "0.72786283", "0.726968", "0.7224457", "0.71614337", "0.7145755", "0.7144727", "0.71412814", "0.71291584", "0.7115016", "0.71070266", "0.7092374", "0.7082026", "0.7068235", "0.7061945", "0.7058983", "0.7046681", "0.7035419", "0.7003102", "0.7002194", "0.698089", "0.69800216", "0.6975782", "0.6947785", "0.6933724", "0.68960416", "0.6877657", "0.6876067", "0.68751734", "0.6872035", "0.6872035" ]
0.72793114
0
Embeds data dictionary and creates inputs to temporal model.
def embed_data( self, data: Dict[str, tf.SparseTensor] ) -> Tuple[tf.Tensor, tf.Tensor]: batch_shape = tf.shape(data["t"])[:-1] flat_data = nest.map_structure(batches.flatten_batch, data) flat_data = nest.map_structure(batches.sparse_fill_empty_rows, flat_data) context_embeddings = ( self.embedding.provide_embeddings_to_forward_fn( flat_data, feature_types=self._config.context_features)) context_embeddings = nest.map_structure( batches.get_unflatten_batch_fn(batch_shape), context_embeddings) sequential_embeddings = ( self.embedding.provide_embeddings_to_forward_fn( flat_data, feature_types=self._config.sequential_features)) sequential_embeddings = nest.map_structure( batches.get_unflatten_batch_fn(batch_shape), sequential_embeddings) dt = tf.divide(tf.cast(data["dt"], dtype=tf.float32), 5400.) t = tf.divide(tf.cast(data["t"], dtype=tf.float32), 5400.) dt_log = tf.log(dt + 1.) embedding_dict = sequential_embeddings.copy() embedding_dict.update(context_embeddings) embedding_dict["dt_s"] = tf.matmul(dt_log, self.w_dt) combined_embedding = self._combine_embeddings_for_input(embedding_dict) inputs = combined_embedding if self._config.get("apply_bias", False): inputs = inputs + tf.get_variable( "_".join([self._config.embedding_type, "final_bias"]), shape=[self.get_total_embedding_size()], initializer=tf.zeros_initializer) time_vect = t return inputs, time_vect
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_embed_itmes(data):\n for k, v in data.items() :\n embed.add_embed_field(name=k, value=v)", "def emb_experiment():\n print(\"EMBEDDINGS EXPERIMENT\")\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'emb_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n else:\n set_params(use_preproc_data=False)\n\n # define the changing parameter and its value\n changing_param_name = 'use_word_emb'\n changing_param_value = [0, 1]\n # {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]\n\n # set constant parameters\n set_params(epochs=20)\n set_params(dropout=0.3)\n\n # save constant parameters to a new \"experiment_..\" file\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**(3))), \"KB\")\n\n # update the parameter value\n set_params(use_word_emb = value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n set_params(model_id=new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name, new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()\n\n if value == changing_param_value[0]:\n set_params(preproc_data_id=new_model_id)", "def encode_data(opt, model, data_loader, log_step=10, logging=print, contextual_model=True):\n batch_time = AverageMeter()\n val_logger = LogCollector()\n\n # switch to evaluate mode\n model.val_start(opt)\n\n end = time.time()\n\n # numpy array to keep all the embeddings\n clip_embs, cap_embs = [], []\n vid_embs, para_embs = [], []\n vid_contexts, para_contexts = [], []\n num_clips_total = []\n cur_vid_total = []\n for i, (clips, captions, videos, paragraphs, lengths_clip, lengths_cap, lengths_video, lengths_paragraph, num_clips, num_caps, ind, cur_vid) in enumerate(data_loader):\n # make sure val logger is used\n model.logger = val_logger\n num_clips_total.extend(num_clips)\n\n # compute the embeddings\n clip_emb, cap_emb = model.forward_emb(clips, captions, lengths_clip, lengths_cap)\n vid_context, para_context = model.forward_emb(videos, paragraphs, lengths_video, lengths_paragraph)\n if contextual_model:\n vid_emb, para_emb = model.structure_emb(clip_emb, cap_emb, num_clips, num_caps, vid_context, para_context)\n else:\n vid_emb, para_emb = model.structure_emb(clip_emb, cap_emb, num_clips, num_caps)\n\n\n clip_emb = F.normalize(clip_emb)\n cap_emb = F.normalize(cap_emb)\n vid_emb = F.normalize(vid_emb)\n para_emb = F.normalize(para_emb)\n vid_context = F.normalize(vid_context)\n para_context = F.normalize(para_context)\n\n\n # initialize the numpy arrays given the size of the embeddings\n clip_embs.extend(clip_emb.data.cpu())\n cap_embs.extend(cap_emb.data.cpu())\n vid_embs.extend(vid_emb.data.cpu())\n para_embs.extend(para_emb.data.cpu())\n vid_contexts.extend(vid_context.data.cpu())\n para_contexts.extend(para_context.data.cpu())\n cur_vid_total.extend(cur_vid)\n\n # measure accuracy and record loss\n model.forward_loss(vid_emb, para_emb, 'test')\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % log_step == 0:\n logging('Test: [{0}/{1}]\\t'\n '{e_log}\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n .format(\n i, len(data_loader), batch_time=batch_time,\n e_log=str(model.logger)))\n\n vid_embs = torch.stack(vid_embs, 0)\n para_embs = torch.stack(para_embs, 0)\n vid_embs = vid_embs.numpy()\n para_embs = para_embs.numpy()\n\n clip_embs = torch.stack(clip_embs, 0)\n cap_embs = torch.stack(cap_embs, 0)\n clip_embs = clip_embs.numpy()\n cap_embs = cap_embs.numpy()\n\n vid_contexts = torch.stack(vid_contexts, 0)\n para_contexts = torch.stack(para_contexts, 0)\n vid_contexts = vid_contexts.numpy()\n para_contexts = para_contexts.numpy()\n\n return vid_embs, para_embs, clip_embs, cap_embs, vid_contexts, para_contexts, num_clips_total, cur_vid_total", "def generate_and_render(\n self,\n num_trajectories: int,\n rng_key: jnp.ndarray,\n t0: utils.FloatArray,\n t_eval: utils.FloatArray,\n y0: Optional[phase_space.PhaseSpace] = None,\n params: Optional[utils.Params] = None,\n within_canvas_bounds: bool = True,\n **kwargs: Any\n ) -> Mapping[str, Any]:\n return self.generate_and_render_dt(\n num_trajectories=num_trajectories,\n rng_key=rng_key,\n t0=t0,\n dt=utils.t_eval_to_dt(t0, t_eval),\n y0=y0,\n params=params,\n within_canvas_bounds=within_canvas_bounds,\n **kwargs\n )", "def export_inputs(self):\n vocab_dict = load_vocab_dict(self.text_vocab_file_path)\n vocab_size = len(vocab_dict)\n label_vocab_dict = load_vocab_dict(self.label_vocab_file_paths[0])\n label_vocab_size = len(label_vocab_dict)\n self.config['data']['vocab_size'] = vocab_size\n self.config['data']['label_vocab_size'] = label_vocab_size\n\n input_sentence = tf.placeholder(\n shape=(None,), dtype=tf.string, name=\"input_sentence\")\n\n input_pipeline_func = self.get_input_pipeline(for_export=True)\n\n token_ids = input_pipeline_func(input_sentence)\n token_ids_len = tf.map_fn(lambda x: compute_sen_lens(x, padding_token=0),\n token_ids)\n\n export_data = {\n \"export_inputs\": {\n \"input_sentence\": input_sentence\n },\n \"model_inputs\": {\n \"input_enc_x\": token_ids,\n \"input_x_len\": token_ids_len\n }\n }\n\n return export_data", "def embedd_data(training_data_text, e_arr, e_dict):\n num_samples = len(training_data_text)\n embedded = np.zeros([num_samples, MAX_WORDS_IN_REVIEW, EMBEDDING_SIZE])\n for i in range(num_samples):\n review_mat = np.zeros([MAX_WORDS_IN_REVIEW, EMBEDDING_SIZE])\n # Iterate to either the end of the sentence of the max num of words, whichever is less\n for w in range(min(len(training_data_text[i]), MAX_WORDS_IN_REVIEW)):\n # assign embedding of that word or to the UNK token if that word isn't in the dict\n review_mat[w] = e_arr[e_dict.get(training_data_text[i][w], 0)]\n embedded[i] = review_mat\n return embedded", "def build_input_embed(self, n_input, t_input):\n n_embed_matrix = tf.Variable(tf.random_uniform(\n [self.num_ntoken, self.n_embed_dim], minval=-0.05, maxval=0.05), name='n_embed_matrix')\n t_embed_matrix = tf.Variable(tf.random_uniform(\n [self.num_ttoken, self.t_embed_dim], minval=-0.05, maxval=0.05), name='t_embed_matrix')\n n_input_embedding = tf.nn.embedding_lookup(n_embed_matrix, n_input)\n t_input_embedding = tf.nn.embedding_lookup(t_embed_matrix, t_input)\n return n_input_embedding, t_input_embedding", "def prepare_emb(self):\n with tf.variable_scope(\"PrepEmb\", reuse=tf.AUTO_REUSE):\n self.src_ten = tf.cast(tf.convert_to_tensor(self.src_ten), tf.float32)\n self.tgt_ten = tf.cast(tf.convert_to_tensor(self.tgt_ten), tf.float32)\n # Mapping\n self.src_ten = tf.matmul(self.src_ten, self.W)\n # Normalization\n self.src_ten = tf.nn.l2_normalize(self.src_ten, axis=1)\n self.tgt_ten = tf.nn.l2_normalize(self.tgt_ten, axis=1)", "def main():\n loader = MicrosoftDataloader()\n train,dev,test = loader.getData()\n sentences = []\n\n # Collect all the training sentences\n for i,row in pd.concat((train,test)).iterrows():\n if isinstance(row[\"sentence1\"], basestring) and isinstance(row[\"sentence2\"], basestring):\n sentences.append(row[\"sentence1\"])\n sentences.append(row[\"sentence2\"])\n\n # Get the mapping between sentences and their cotext vectors\n mapped = get_sentence_to_context_map(sentences)\n\n # At this stage we have a map between every sentence and its context vector\n # However the JSON file must contain sentences in the same order as in the MSR data file\n data = []\n for i,sentence in enumerate(sentences):\n embedding = mapped[sentence]\n data.append({'index':i, 'embedding':embedding, 'text':sentence})\n\n # Write the sentences and embeddings to JSON\n # The array index should corrospond to the sentence #\n print \"Saving embedded sentences to: {0}\".format(EMBED_FILE)\n with open(EMBED_FILE,'w') as outfile:\n json.dump(data,outfile,indent=2)", "def _generate_entities(data):\n\n i = 0\n while i < len(data):\n release_date = datetime.strptime(data[i].text, '%m/%d/%Y')\n release_date = release_date.strftime('%Y-%m-%d')\n name = data[i+1].text\n url = data[i+1].find_element_by_tag_name('a').get_attribute(\"/href\")\n\n href = data[i+2].find_element_by_tag_name('a').get_attribute(\"/href\")\n related = []\n if href:\n doc = BeautifulSoup(helpers.fetch_string(href), \"html.parser\")\n tds = doc.find_all(\"td\", class_='ms-vb')\n for td in tds:\n try:\n related.append(td.find('a')['href'])\n except AttributeError:\n pass\n \n related_documents = ' '.join(related) \n fields = [{\"name\": \"Release date\", \"value\": release_date},\n {\"tag\": \"url\", \"value\": url},\n {\"name\": \"Related documents\", \"value\": related_documents}]\n i += 3\n\n my_id = helpers.make_id(name)\n if len(my_id) > 99:\n my_id = my_id[:99]\n\n entity = {\n \"_meta\": {\n \"id\": my_id,\n \"entity_type\": \"company\"\n },\n \"fields\": fields,\n \"name\": name,\n }\n\n helpers.emit(entity)", "def build_inputs(\n timesteps,\n latlon_dense_units,\n concat_dense_units,\n latent_dim,\n vocab_sizes,\n noise=False,\n mask=False,\n):\n latlon_input, latlon_embed = build_inputs_latlon(timesteps, latlon_dense_units)\n inputs = [latlon_input]\n embeddings = [latlon_embed]\n for key, val in vocab_sizes.items():\n cat_input, cat_embed = build_inputs_cat(timesteps, val, key)\n inputs.append(cat_input)\n embeddings.append(cat_embed)\n concat_input = layers.Concatenate(axis=2)(embeddings)\n unstacked = layers.Lambda(lambda x: tf.unstack(x, axis=1))(concat_input)\n d = layers.Dense(\n units=concat_dense_units,\n activation=\"relu\",\n kernel_initializer=initializers.he_uniform(seed=1),\n name=\"emb_trajpoint\",\n )\n if noise:\n noise_input = layers.Input(shape=(latent_dim,), name=\"input_noise\")\n inputs.append(noise_input)\n dense_outputs = [d(layers.Concatenate(axis=1)([x, noise_input])) for x in unstacked]\n else:\n dense_outputs = [d(x) for x in unstacked]\n if mask:\n inputs.append(layers.Input(shape=(timesteps, 1), name=\"input_mask\"))\n emb_traj = layers.Lambda(lambda x: tf.stack(x, axis=1))(dense_outputs)\n return (inputs, emb_traj)", "def prepare_data(self):\n data = self._get_dataset(self.hparams.dataset_path)\n label_encoder = data[\"label_encoder\"]\n del data[\"label_encoder\"]\n\n click.secho(\"Building inputs and labels.\", fg=\"yellow\")\n datasets = {\n \"train\": defaultdict(list),\n \"valid\": defaultdict(list),\n \"test\": defaultdict(list),\n }\n for dataset_name, dataset in data.items():\n for sample in dataset:\n instance = self.build_input(\n self.tokenizer, sample[\"text\"], label_encoder, sample[\"label\"]\n )\n for input_name, input_array in instance.items():\n datasets[dataset_name][input_name].append(input_array)\n\n click.secho(\"Padding inputs and building tensors.\", fg=\"yellow\")\n tensor_datasets = {\"train\": [], \"valid\": [], \"test\": []}\n for dataset_name, dataset in datasets.items():\n dataset = self.pad_dataset(dataset, padding=self.tokenizer.pad_index)\n for input_name in MODEL_INPUTS:\n if input_name == \"labels\":\n tensor = torch.tensor(dataset[input_name], dtype=torch.float32)\n else:\n tensor = torch.tensor(dataset[input_name])\n tensor_datasets[dataset_name].append(tensor)\n\n self.train_dataset = TensorDataset(*tensor_datasets[\"train\"])\n self.valid_dataset = TensorDataset(*tensor_datasets[\"valid\"])\n self.test_dataset = TensorDataset(*tensor_datasets[\"test\"])\n click.secho(\n \"Train dataset (Batch, Candidates, Seq length): {}\".format(\n self.train_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Valid dataset (Batch, Candidates, Seq length): {}\".format(\n self.valid_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )\n click.secho(\n \"Test dataset (Batch, Candidates, Seq length): {}\".format(\n self.test_dataset.tensors[0].shape\n ),\n fg=\"yellow\",\n )", "def add_ae(self, model, dataset, latent_options, model_paths, pre_process=None):\n ae = autoencoder(self.app, model, dataset, latent_options, model_paths, pre_process)\n self.body_children.append(ae)", "def embedding(org_input):\n # Create the embedding list\n for f in range(Config.num_feature):\n num_cat_value = Config.schema[f]\n\n if num_cat_value == 1:\n pass\n elif num_cat_value > 1:\n embed_dict[f] = tf.get_variable(\n name=\"embed_\" + str(f),\n shape=[num_cat_value, Config.embed_size[f]],\n trainable=True)\n else:\n raise ValueError(\"Schema values should be positive integers!\")\n\n # Create embedded inputs\n f_size = np.sum(Config.embed_size)\n embedded_input = embed_events(org_input, f_size)\n\n return embedded_input", "def setupEmbeddings(self, path = \"awd_lm\"):\n try:\n data_lm = TextLMDataBunch.from_df(path, train_df=self.train, valid_df=self.valid,\\\n text_cols = \"text\", label_cols = \"label\")\n except:\n print(\"error creating LM\")\n return\n\n learn = language_model_learner(data_lm, arch=AWD_LSTM, drop_mult=.25)\n learn.fit_one_cycle(1, 1e-2)\n learn.save_encoder('ft_enc_1')\n\n learn.unfreeze()\n learn.fit_one_cycle(3, 1e-3)\n learn.save_encoder('ft_enc_1')\n\n learn.unfreeze()\n learn.fit_one_cycle(5, 5e-4)\n learn.save_encoder('ft_enc_1')\n\n print(\"feature encoding saved\")", "def embed(self, data, mime_type=\"text/plain\", encode_data_to_base64=True):\n if encode_data_to_base64:\n data = base64.standard_b64encode(data.encode()).decode()\n self.embeddings.append({\"data\": data, \"mime_type\": mime_type})", "def gen_ep_data(self,ntrials,trlen):\n self.resort_emat()\n ## instruction\n # for each trial, generate random instruction encoding sequence\n i_encoding_input = np.array([\n np.random.permutation(np.arange(1,self.nmaps+1)) \n for i in range(ntrials)\n ])\n i_test_input = np.zeros([ntrials,trlen])\n i_input = np.concatenate([\n i_encoding_input,i_test_input],\n 1).astype(int).reshape(-1) # (ntrials,trlen+)\n ## stimulus\n x_encoding_input = i_encoding_input\n x_test_input = np.random.randint(1,self.nmaps+1,[ntrials,trlen])\n x_input = np.concatenate([x_encoding_input,x_test_input],1)\n # print('X',x_input)\n ''' \n embed x_input: \n [ntrials,nmaps+trlen] -> s_input [ntrials*(nmaps+trlen),edim]\n explicit loop required for flatten and embedd x_input\n because if switchmaps=1, matrix is resorted between trials\n and therefore same stimulus token integers correspond to\n different stimulus embeddings on different trials\n '''\n s_input = -np.ones([ntrials,(self.nmaps+trlen),self.stimdim])\n for trialn,x_input_trial in enumerate(x_input): \n if self.switchmaps: self.resort_emat()\n s_input[trialn] = self.emat[x_input_trial]\n # format output\n i_input = tr.unsqueeze(tr.LongTensor(i_input),1)\n s_input = tr.unsqueeze(tr.Tensor(np.concatenate(s_input)),1)\n yseq = tr.unsqueeze(tr.LongTensor(x_input.reshape(-1)),1)\n return i_input,s_input,yseq", "def feed(self, data):\n try:\n feed = {}\n for i in range(len(self.inputs)):\n feed[self.inputs[i]] = data[i]\n return feed\n except Exception:\n pass", "def embed(query: str) -> dict:\n embedding = model.embed(query)\n return {\"embedding\": embedding, \"model\": model_name}", "def forward(self, data: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:\n # possibly pass dynamic and static inputs through embedding layers, then concatenate them\n x_h = self.hindcast_embedding_net(data)\n x_f = self.forecast_embedding_net(data)\n\n # run hindcast part of the lstm\n lstm_output_hindcast, (h_n_hindcast, c_n_hindcast) = self.hindcast_lstm(input=x_h)\n lstm_output_hindcast = lstm_output_hindcast.transpose(0, 1)\n output_hindcast = self.hindcast_head(self.dropout(lstm_output_hindcast))\n\n # reshape to [batch_size, seq, n_hiddens]\n h_n_hindcast = h_n_hindcast.transpose(0, 1)\n c_n_hindcast = c_n_hindcast.transpose(0, 1)\n\n # run forecast heads\n batch_size = x_f.shape[1]\n x_f = x_f.transpose(0, 1).contiguous()\n x = torch.cat([h_n_hindcast.squeeze(dim=1), x_f.view(batch_size, -1)], dim=-1)\n x = self.forecast_network(x)\n x = x.view(batch_size, self.cfg.forecast_seq_length, -1)\n output_forecast = self.forecast_head(self.dropout(x))\n\n # start an output dictionary\n pred = {key: torch.cat([output_hindcast[key], output_forecast[key]], dim=1) for key in output_hindcast}\n\n pred.update(\n {\n 'lstm_output_hindcast': lstm_output_hindcast,\n 'output_forecast': output_forecast,\n\n 'h_n_hindcast': h_n_hindcast,\n 'c_n_hindcast': c_n_hindcast,\n }\n )\n\n return pred", "def feed_inputs(self):\n feed_dict = collections.defaultdict(list)\n for i in range(self._pipe.batch_size):\n data = self.example_to_data(self._buffer.get())\n for k, v in data.items():\n feed_dict[k].append(v)\n for k, v in self.features.items():\n self._pipe.feed_input(self.features[k], feed_dict[k])", "def create_embedding(skills):\n corpus = list(skills[\"description\"].values)\n embedder = SentenceTransformer(config[\"sentence_transformer\"][\"model\"])\n embedding = embedder.encode(corpus, show_progress_bar=True)\n return embedding", "def build_model(self):\n num_layers, num_units, input_window, output_window, encoder_exog_size, decoder_exog_size, dropout_rate, l2_regu =\\\n self.num_layers, self.num_units, self.input_window, self.output_window, self.encoder_exog_size, self.decoder_exog_size, self.dropout_rate, self.l2_regu\n \n #Define embedding layers (item_id, event_name), in case the embedding layers are applied to both encoder and decoder.\n event_embed = Embedding(input_dim=31, output_dim=8, mask_zero=False, name='event_embed')\n \n #Define encoder model\n encoder_input = Input(shape=(input_window, 1)) #endog input for encoder\n encoder_exog_input = Input(shape=(input_window, encoder_exog_size))\n \n encoder_concat_input = Concatenate()([encoder_input, encoder_exog_input])\n \n encoder_lstm_res = {}\n for i in range(num_layers):\n encoder_lstm = LSTM(num_units[i], kernel_regularizer=l2_regu, recurrent_regularizer=l2_regu, dropout=dropout_rate, recurrent_dropout=0,\n return_sequences=True, return_state=True, name='encoder_lstm_{}'.format(i))\n if (i == 0):\n encoder_lstm_outputs, encoder_lstm_state_h, encoder_lstm_state_c = encoder_lstm(encoder_concat_input)\n else:\n encoder_lstm_outputs, encoder_lstm_state_h, encoder_lstm_state_c = encoder_lstm(encoder_lstm_res[(i-1, 'outputs')])\n\n encoder_lstm_res[(i, 'model')] = encoder_lstm\n encoder_lstm_res[(i, 'outputs')] = encoder_lstm_outputs\n encoder_lstm_res[(i, 'states')] = [encoder_lstm_state_h, encoder_lstm_state_c]\n\n #Define decoder model\n #endog input for decoder. It is always a vector of 0s, meaning that model is trained unconditionally without using any forecast information.\n decoder_input = Input(shape=(output_window, 1))\n decoder_exog_input = Input(shape=(output_window, decoder_exog_size))\n \n decoder_event_input = Input(shape=(output_window,))\n decoder_event_embed = event_embed(decoder_event_input)\n \n decoder_concat_input = Concatenate()([decoder_input, decoder_exog_input, decoder_event_embed])\n \n decoder_lstm_res = {}\n for i in range(num_layers):\n decoder_lstm = LSTM(num_units[i], kernel_regularizer=l2_regu, recurrent_regularizer=l2_regu, dropout=dropout_rate, recurrent_dropout=0,\n return_sequences=True, return_state=True, name='decoder_lstm_{}'.format(i))\n if (i == 0):\n decoder_lstm_outputs, _, _ = decoder_lstm(decoder_concat_input, initial_state=encoder_lstm_res[(i, 'states')])\n else:\n decoder_lstm_outputs, _, _ = decoder_lstm(decoder_lstm_res[(i-1, 'outputs')], initial_state=encoder_lstm_res[(i, 'states')])\n\n decoder_lstm_res[(i, 'model')] = decoder_lstm\n decoder_lstm_res[(i, 'outputs')] = decoder_lstm_outputs\n\n decoder_output = Dense(1, activation=None, kernel_regularizer=l2_regu, name='decoder_output')(decoder_lstm_outputs)\n\n #training mode of model\n model = Model(inputs = [encoder_input, encoder_exog_input, decoder_input, decoder_exog_input, decoder_event_input], outputs = decoder_output)\n adam = Adam(learning_rate=self.lr)\n model.compile(optimizer=adam, loss='mse')\n print(model.summary())\n \n self.model = model\n \n return(model)", "def export_embeddings(self):\n save_path = self.config.path_embeddings / self.model.model_name\n save_path.mkdir(parents=True, exist_ok=True)\n \n idx2ent = self.model.config.knowledge_graph.read_cache_data('idx2entity')\n idx2rel = self.model.config.knowledge_graph.read_cache_data('idx2relation')\n\n\n series_ent = pd.Series(idx2ent)\n series_rel = pd.Series(idx2rel)\n series_ent.to_pickle(save_path / \"ent_labels.pickle\")\n series_rel.to_pickle(save_path / \"rel_labels.pickle\")\n\n with open(str(save_path / \"ent_labels.tsv\"), 'w') as l_export_file:\n for label in idx2ent.values():\n l_export_file.write(label + \"\\n\")\n\n with open(str(save_path / \"rel_labels.tsv\"), 'w') as l_export_file:\n for label in idx2rel.values():\n l_export_file.write(label + \"\\n\")\n\n for parameter in self.model.parameter_list:\n all_ids = list(range(0, int(parameter.shape[0])))\n stored_name = parameter.name.split(':')[0]\n # import pdb; pdb.set_trace()\n\n if len(parameter.shape) == 2:\n all_embs = parameter.numpy()\n with open(str(save_path / (\"%s.tsv\" % stored_name)), 'w') as v_export_file:\n for idx in all_ids:\n v_export_file.write(\"\\t\".join([str(x) for x in all_embs[idx]]) + \"\\n\")\n\n df = pd.DataFrame(all_embs)\n df.to_pickle(save_path / (\"%s.pickle\" % stored_name))", "def generate(self, text, prev_mel):\n # forward pass through text embedding and get k and v\n kv = self.t_encoder(text)\n k = kv[:,:self.hp.d,:]\n v = kv[:,self.hp.d:,:]\n # forward pass through audio encoding and get Q\n q = self.a_encoder(prev_mel)\n \n # compute attention and use forcible incremental attention (section 4.2)\n a = (k.transpose(2,1)).matmul(q)/np.sqrt(self.hp.d)\n a = F.softmax(a, dim=1)\n \"\"\"\n # get argmax\n argmax = a[0].argmax(dim=0) # argmax on the N dimension\n # forcibly incremental attention\n preva = -1\n for i in range(a.shape[-1]):\n if argmax[i] < preva -1 or preva + 3 < argmax[i]:\n # force the ith column to be zero\n a[:,:,i] = 0\n # find correct position\n position = min(a.shape[1]-1, preva + 1)\n a[:,position,i] = 1.0\n # update preva\n preva = a[0,:,i].argmax()\"\"\"\n\n # finish computing y and a\n r = r = v.matmul(a)\n\n rprime = torch.cat((r, q), dim=1)\n ylogit = self.decoder(rprime)\n y = F.sigmoid(ylogit)\n return y, ylogit, a", "def build_inputs(self):\n if self.mode == \"encode\":\n # Word embeddings are fed from an external vocabulary which has possibly\n # been expanded (see vocabulary_expansion.py).\n encode_ids1 = None\n encode_ids2 = None\n encode_mask1 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask1\")\n encode_mask2 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask2\")\n label = None\n\n elif self.mode == \"test\":\n encode_ids1 = None\n encode_ids2 = None\n encode_mask1 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask1\")\n encode_mask2 = tf.placeholder(tf.int8, (None, None), name=\"encode_mask2\")\n label = None\n \n else:\n # Prefetch serialized tf.Example protos.\n input_queue = input_ops.prefetch_input_data(\n self.reader,\n self.config.input_file_pattern,\n shuffle=self.config.shuffle_input_data,\n capacity=self.config.input_queue_capacity,\n num_reader_threads=self.config.num_input_reader_threads)\n\n # Deserialize a batch.\n serialized = input_queue.dequeue_many(self.config.batch_size)\n s1, s2, label = input_ops.parse_example_batch(\n serialized)\n\n encode_ids1 = s1.ids\n encode_ids2 = s2.ids\n\n encode_mask1 = s1.mask\n encode_mask2 = s2.mask\n \n\n\n self.encode_ids1 = encode_ids1\n self.encode_ids2 = encode_ids2\n\n self.encode_mask1 = encode_mask1\n self.encode_mask2 = encode_mask2\n\n self.label = label", "def bert_embed(data, bert_model, BATCH_SIZE = 16, MAX_LEN = 128):\n \n dataset = TensorDataset(\n data['input_ids'], data['attention_masks'], data['indices']\n )\n sampler = SequentialSampler(dataset)\n dataloader = DataLoader(dataset, sampler=sampler, batch_size=BATCH_SIZE)\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n print('Running on ' + device.type)\n if device.type == 'cuda':\n bert_model.cuda() # put bert in training mode\n \n N = data['indices'].shape[0]\n X = np.zeros((N, 768))\n pos = 0\n for batch in dataloader:\n batch = tuple(t.to(device) for t in batch)\n b_input_ids, b_input_masks, b_indices = batch\n \n with torch.no_grad():\n embeddings = bert_model(\n b_input_ids.view(-1, MAX_LEN),\n b_input_masks.view(-1, MAX_LEN)\n )[2]\n # Take the mean of the last 4 hidden states\n embeddings = (embeddings[-4] + embeddings[-3] + embeddings[-2] + embeddings[-1])/4\n for j, label_ind in enumerate(b_indices.cpu().detach().numpy()):\n X[pos,:] = embeddings[j, int(label_ind), :].cpu().detach().numpy()\n pos+=1\n return X", "def build_data(self):\n from desiutil.io import combine_dicts\n # Loop on exposures\n odict = {}\n for qanight in self.qa_nights:\n for qaexp in qanight.qa_exps:\n # Get the exposure dict\n idict = write_qa_exposure('foo', qaexp, ret_dict=True)\n odict = combine_dicts(odict, idict)\n # Finish\n self.data = odict", "def add_embeddings(self):\n\n with tf.device('/cpu:0'):\n with tf.variable_scope('Embedding_Layer'):\n embeddings = tf.Variable(self.initial_embeddings,name = 'Embeddings')\n self.input_embeddings = tf.nn.embedding_lookup(embeddings, self.inputs_placeholder) #(N,S,D)\n self.question_embeddings = tf.nn.embedding_lookup(embeddings, self.questions_placeholder) #(N,S,D)", "def make_interactive(model: EpamModel, Xy_raw: pd.DataFrame):\n \n def inner(base_example: int = 0):\n \"\"\"Inner function that does the work.\"\"\"\n EX = Xy_raw.iloc[base_example]\n print(\"Base Example IDs:\")\n print((Xy_raw.index[[base_example]]).to_frame().reset_index(drop=True).iloc[0].rename(index=base_example))\n\n def get_widget(col: str):\n V = EX[col]\n if 'Tak' in Xy_raw[col].values.tolist():\n zz = [\"Tak\", \"Nie\", \"Nie wiadomo\"]\n return w.Select(options=zz, value=\"Nie wiadomo\" if (V not in zz) else V)\n\n d = Xy_raw[col].dtype\n if d == \"int64\":\n return w.IntSlider(min=Xy_raw[col].min(), max=Xy_raw[col].max(), value=V)\n elif d == \"float64\":\n return w.FloatText(value=V)#Xy_raw[col].mean())\n\n def _f(s):\n try:\n return float(s)\n except:\n return np.nan\n avg_val = Xy_raw[col].apply(_f).mean()\n if pd.isnull(V):\n V = str(avg_val)\n return w.Text(value=V)\n\n\n def make_pred(**vals):\n my_X_raw = pd.Series(vals).to_frame().T\n my_X_raw = my_X_raw.astype(Xy_raw.drop(columns=[basic_target]).dtypes)\n my_X = cleanup_df(my_X_raw)#.astype(X.dtypes)\n\n ifd = model.ifd_\n with model.build_model(my_X) as m:\n _ppc_oos = pm.fast_sample_posterior_predictive(ifd)\n ifd_oos = az.from_pymc3(posterior_predictive=_ppc_oos)\n ppc_oos = ifd_oos.posterior_predictive['y_pred']\n y_oos = ppc_oos.mean([\"chain\", \"draw\"]).to_series()\n\n # y_prob = model.predict_proba(my_X)\n print(\"Prediction:\", y_oos.iloc[0])\n\n widgets = {k: get_widget(k) for k in Xy_raw.drop(columns=[basic_target]).columns}\n \n w.interact_manual(make_pred, **widgets)\n return w.interact(\n inner, \n base_example=w.IntText(min=0, max=len(Xy_raw)-1, value=0, description=\"Base Case\")\n )" ]
[ "0.56800663", "0.55806065", "0.5433915", "0.5323284", "0.5315226", "0.5314329", "0.52291834", "0.5220039", "0.5213521", "0.5192599", "0.518687", "0.51661015", "0.51657164", "0.5155179", "0.5129116", "0.50947684", "0.50538045", "0.5042975", "0.50393456", "0.5035438", "0.4997962", "0.49960172", "0.4989464", "0.4982798", "0.49822718", "0.49730998", "0.49701768", "0.496972", "0.49647474", "0.49592927" ]
0.5893568
0
Gets the regularization loss for embedding weights.
def get_embedding_regularization_loss(self) -> tf.Tensor: sparse_lookup_regularization = self._config.sparse_lookup_regularization sparse_lookup_regularization_weight = ( self._config.sparse_lookup_regularization_weight) encoder_regularization = self._config.encoder_regularization encoder_regularization_weight = self._config.encoder_regularization_weight embedding_weights = [ w for w in tf.trainable_variables() if ( self._config.embedding_type.lower() in w.name.lower())] sparse_encoding_regularizer = model_utils.get_regularizer( sparse_lookup_regularization, sparse_lookup_regularization_weight) encoder_regularizer = model_utils.get_regularizer( encoder_regularization, encoder_regularization_weight) sparse_encoder_lookup_weights = [ w for w in embedding_weights if "lookup" in w.name.lower()] encoder_weights = [ w for w in embedding_weights if "lookup" not in w.name.lower()] if not sparse_encoding_regularizer: sparse_lookup_reg_penalty = tf.constant(0.) else: sparse_lookup_reg_penalty = slim.apply_regularization( sparse_encoding_regularizer, sparse_encoder_lookup_weights) if (self._config.embedding_type == types.EmbeddingType.DEEP_EMBEDDING and self._config.deep.encoder_type == types.EmbeddingEncoderType.SNR): encoders = self.embedding.get_encoders().values() encoders_regularization_penalty = sum( [encoder.get_regularization_penalty() for encoder in encoders]) return sparse_lookup_reg_penalty + encoders_regularization_penalty if not encoder_regularizer or not encoder_weights: encoder_reg_penalty = tf.constant(0.) else: encoder_reg_penalty = slim.apply_regularization(encoder_regularizer, encoder_weights) embedding_regularization_penalty = ( sparse_lookup_reg_penalty + encoder_reg_penalty) return embedding_regularization_penalty
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def regularization_loss(embedding, lambda_coef, regularization_type='msq'):\n loss = tf.constant(0.0, dtype=tf.float32)\n if regularization_type == 'msq':\n loss = tf.reduce_mean(tf.square(tf.norm(embedding, axis=1))) * 0.5\n elif regularization_type == 'unit_length':\n loss = tf.reduce_mean(tf.abs(tf.square(tf.norm(embedding, axis=1)) - 1.0))\n else:\n raise ValueError('Regularization type is not known')\n return loss * lambda_coef", "def loss_weights(self):\n return None", "def compute_loss(self):\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.data.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def loss(self) -> KernelLoss:\n return self._loss", "def apply_regularization(self, w, loss, gradient, regularization, lambda_, m):\n if regularization == 'l2':\n loss += lambda_ / (2 * m) * np.squeeze(w.T.dot(w))\n gradient += lambda_ / m * w\n elif regularization == 'l1':\n loss += lambda_ / (2 * m) * np.sum(np.abs(w))\n gradient += lambda_ / m * np.sum((w >= 0) * 1 + (w < 0) * -1)\n return loss, gradient", "def get_loss(self):\n return self.loss / self.cnt", "def get_loss(self):\n raise NotImplementedError", "def loss(self):\n return la.norm(self.resids) / self.normX", "def _get_loss_weight(self) -> torch.Tensor:\n n_pos: torch.Tensor = 0.0\n n_neg: torch.Tensor = 0.0\n\n for _, ground_truth in self.train_loader:\n n_poss_curr = ground_truth.sum()\n n_pos += n_poss_curr\n n_neg += ground_truth.numel() - n_poss_curr\n\n eps = torch.finfo(n_pos.dtype).eps\n return n_neg / (n_pos + eps)", "def loss(self):\n return self._get(\"loss\")", "def loss(self):\n return self._loss", "def compute_loss(self):\n self.prototypes = self.compute_prototypes()\n self.test_logits = self.compute_logits()\n loss = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.episode.test_labels, logits=self.test_logits)\n cross_entropy_loss = tf.reduce_mean(loss)\n regularization = tf.reduce_sum(\n tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n loss = cross_entropy_loss + self.weight_decay * regularization\n return loss", "def loss_op(self):\n return self.loss", "def regularizer(self):\n \n # L2 regularization for the fully connected parameters.\n regularizers = (tf.nn.l2_loss(self.weights.wd1) + tf.nn.l2_loss(self.weights.bd1) + \n tf.nn.l2_loss(self.weights.wout) + tf.nn.l2_loss(self.weights.bout))\n return regularizers", "def _loss(W):\r\n M = X @ W\r\n if loss_type == 'l2':\r\n R = X - M\r\n loss = 0.5 / X.shape[0] * (R ** 2).sum()\r\n G_loss = - 1.0 / X.shape[0] * X.T @ R\r\n elif loss_type == 'logistic':\r\n loss = 1.0 / X.shape[0] * (np.logaddexp(0, M) - X * M).sum()\r\n G_loss = 1.0 / X.shape[0] * X.T @ (sigmoid(M) - X)\r\n elif loss_type == 'poisson':\r\n S = np.exp(M)\r\n loss = 1.0 / X.shape[0] * (S - X * M).sum()\r\n G_loss = 1.0 / X.shape[0] * X.T @ (S - X)\r\n else:\r\n raise ValueError('unknown loss type')\r\n return loss, G_loss", "def get_reg(self):\n loss = 0\n for name, m in self.net.named_children():\n if name.startswith('wave'):\n loss += m[0].GainLayer.get_reg()\n elif name.startswith('conv'):\n loss += 0.5 * self.wd * torch.sum(m[0].weight**2)\n loss += 0.5 * self.wd * torch.sum(self.fc1.weight**2)\n return loss", "def loss(self, X, Y, lmd):\n P, _ = self.forward(X)\n loss = np.mean(-np.log(np.einsum('ij,ji->i', Y.T, P)))\n\n reg = 0 # Regularization term\n for w in self.W:\n reg += np.sum(np.square(w))\n\n reg *= lmd\n\n cost = loss + reg\n\n return cost", "def get_loss(self, x, weights=1.0):\n input_dtype = x.dtype\n x = self.cast(x, mstype.float32)\n weights = self.cast(weights, mstype.float32)\n x = self.mul(weights, x)\n if self.reduce and self.average:\n x = self.reduce_mean(x, self.get_axis(x))\n if self.reduce and not self.average:\n x = self.reduce_sum(x, self.get_axis(x))\n x = self.cast(x, input_dtype)\n return x", "def _calc_smooth_loss(self, loss, len_examples, regularizer_type=None):\n if regularizer_type == 'L2':\n # Add regulatization term to loss\n\n return 1./len_examples * loss\n else:\n return 1./len_examples * loss", "def loss(self):\n if not self.run:\n self._run()\n return self.model_loss", "def _weighted_loss(loss, weight):\n with ops.name_scope(None, \"weighted_loss\", (loss, weight)) as name:\n return math_ops.multiply(\n array_ops.reshape(\n loss, shape=(-1,)),\n array_ops.reshape(\n weight, shape=(-1,)),\n name=name)", "def _get_loss(self):\n raise NotImplementedError", "def unnormalized_loss(self):\n return 0.5 * la.norm(self.resids) ** 2", "def _get_model_loss(self, scores_pos, scores_neg):\n\n if self.regularizer is not None:\n return self.loss.apply(scores_pos, scores_neg) + \\\n self.regularizer.apply([self.ent_emb, self.rel_emb])\n else:\n return self.loss.apply(scores_pos, scores_neg)", "def gradient_penalty_loss(y_true, y_pred, averaged_samples, wgan_gp_weight):\n # first get the gradients:\n # assuming: - that y_pred has dimensions (batch_size, 1)\n # - averaged_samples has dimensions (batch_size, nbr_features)\n # gradients afterwards has dimension (batch_size, nbr_features), basically\n # a list of nbr_features-dimensional gradient vectors\n gradients = K.gradients(y_pred, averaged_samples)[0]\n # compute the euclidean norm by squaring ...\n gradients_sqr = K.square(gradients)\n # ... summing over the rows ...\n gradients_sqr_sum = K.sum(gradients_sqr,\n axis=np.arange(1, len(gradients_sqr.shape)))\n # ... and sqrt\n gradient_l2_norm = K.sqrt(gradients_sqr_sum)\n # compute lambda * (1 - ||grad||)^2 still for each single sample\n gradient_penalty = wgan_gp_weight * K.square(1 - gradient_l2_norm)\n # return the mean as loss over all the batch samples\n return K.mean(gradient_penalty)", "def ridge_loss(w: FloatTensor, x: FloatTensor, y: FloatTensor, lmb: float) -> float:\n return ols_loss(w, x, y, 0.0) + lmb * w.pow(2).sum()", "def gradient_penalty_loss(y_true, y_pred, averaged_samples, gradient_penalty_weight):\n gradients = K.gradients(K.sum(y_pred), averaged_samples)\n gradient_l2_norm = K.sqrt(K.sum(K.square(gradients)))\n gradient_penalty = gradient_penalty_weight * K.square(1 - gradient_l2_norm)\n return gradient_penalty", "def l2_regularization_penalty(self):\n return self.l2 * (np.linalg.norm(self.weights)**2)", "def compute_loss(y, tx, w):\n e = y - tx.dot(w)\n return calculate_mse(e)", "def get_loss(self, output, target, target_weight):\n losses = dict()\n assert not isinstance(self.loss, nn.Sequential)\n assert target.dim() == 3 and target_weight.dim() == 3\n losses['reg_loss'] = self.loss(output, target, target_weight)\n return losses" ]
[ "0.6920596", "0.65842164", "0.6542781", "0.6534144", "0.6424401", "0.63727957", "0.6351934", "0.63452446", "0.63420784", "0.6311684", "0.63035953", "0.6266828", "0.6257979", "0.62567496", "0.624748", "0.6185578", "0.6178956", "0.6168068", "0.6154505", "0.614846", "0.61013037", "0.60985553", "0.60814863", "0.6071226", "0.6061937", "0.60429066", "0.6026084", "0.60182685", "0.60087115", "0.59976447" ]
0.7138567
0
Combines embeddings into one input for the model. The embeddings can be combined in different ways and this function encapsulates that logic and returns an input vector based on the combination method that is specified.
def _combine_embeddings_for_input( self, embedding_dict: Dict[str, int]) -> tf.Tensor: if self._config.embedding_combination_method == ( types.EmbeddingCombinationMethod.SUM_ALL): return sum(embedding_dict.values()) elif self._config.embedding_combination_method == ( types.EmbeddingCombinationMethod.CONCATENATE): return tf.concat(list(embedding_dict.values()), axis=-1) elif self._config.embedding_combination_method == ( types.EmbeddingCombinationMethod.SUM_BY_SUFFIX): feature_suffixes = [ get_feature_suffix(feat) for feat in embedding_dict.keys() ] combined_embedding = None for suffix in feature_suffixes: embeddings_to_sum = [] for feat, emb in embedding_dict.items(): feat_suffix = get_feature_suffix(feat) if feat not in self._config.identity_lookup_features and (feat_suffix == suffix): embeddings_to_sum.append(emb) if combined_embedding is None: combined_embedding = [sum(embeddings_to_sum)] else: combined_embedding += [sum(embeddings_to_sum)] combined_embedding += [ embedding_dict[feat] for feat in self._config.identity_lookup_features ] return tf.concat(combined_embedding, axis=1) elif self._config.embedding_combination_method == ( types.EmbeddingCombinationMethod.COMBINE_SNR_OUT): return deep_encoders.compute_combined_snr_embedding( embedding_dict=embedding_dict) else: raise ValueError("Embedding combination method " f"{self._config.embedding_combination_method} " "not recognized.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def call(self, x, *args, **kwargs):\n with tf.name_scope(\"embedding\"):\n # fills out of bound values with padding symbol\n out_bound_mask = tf.cast(x > (self.vocab_size - 1), dtype=tf.int32)\n x *= 1 - out_bound_mask\n x += out_bound_mask * tf.cast(self.pad_sym, dtype=tf.int32)\n\n embeddings = tf.gather(self.shared_weights, x)\n if self.embed_scale:\n # Scale embedding by the sqrt of the hidden size\n embeddings *= self.hidden_size ** 0.5\n\n if self.mask_paddings:\n # Create binary array of size [batch_size, length]\n # where 1 = padding, 0 = not padding\n padding = get_padding(x, padding_value=self.pad_sym)\n\n # Set all padding embedding values to 0\n # embeddings *= tf.expand_dims(1 - padding, -1)\n embeddings *= tf.cast(tf.expand_dims(1.0 - padding, -1), dtype=embeddings.dtype)\n return embeddings", "def mergeEmbeddings(embeddings: Iterable[Iterable[float]],\r\n weights: Iterable[float]=None,\r\n method: FunctionType=np.sum,\r\n **kwargs) -> np.array:\r\n if len(embeddings) == 0:\r\n raise ValueError(\"embeddings input empty!\")\r\n if weights is not None:\r\n if type(weights) != np.array:\r\n weights = np.array(weights)\r\n if len(weights) != len(embeddings):\r\n raise ValueError(\r\n \"Incorrect # of weights! {0} weights, {1} embeddings\".format(\r\n len(weights), len(embeddings)))\r\n if method is None:\r\n method = np.sum\r\n # sane default behavior is \"sum\"\r\n if not kwargs and method == np.sum:\r\n if weights is not None:\r\n embeddings = embeddings * weights[:, np.newaxis]\r\n # This should not delete the actual value, \r\n # only the local name (unit tested)\r\n weights = None\r\n kwargs['axis'] = 0\r\n try:\r\n if weights is None:\r\n return method(embeddings, **kwargs)\r\n return method(embeddings, weights, **kwargs)\r\n except TypeError as te:\r\n print((\"\\n\\nError calling defined method.\\n \"\r\n + \"method called: {0}\\n\").format(method),\r\n \"\\n\\nNOTE: This can happen if you are passing weights \"\r\n \"in a function that doesn't take them as the second argument!\\n\"\r\n \"Function signature was:\\n\\t {0}\".format(inspect.signature(method)),\r\n (\"\\nArgs passed were:\"\r\n + \"\\n\\tembeddings: {0}\"\r\n + \"\\n\\tweights: {1}\"\r\n + \"\\n\\tkwargs: {2}\").format(\r\n embeddings, weights, kwargs))\r\n raise(te)", "def omnibus_embedding_pairwise(\n graphs: List[NxGraphType],\n dimensions: int = 100,\n elbow_cut: Optional[int] = None,\n svd_solver_algorithm: SvdAlgorithmType = \"randomized\",\n svd_solver_iterations: int = 5,\n svd_seed: Optional[int] = None,\n weight_attribute: str = \"weight\",\n use_laplacian: bool = False,\n) -> List[Tuple[Embeddings, Embeddings]]:\n check_argument(len(graphs) > 1, \"more than one graph is required\")\n\n check_argument(dimensions >= 1, \"dimensions must be positive\")\n\n check_argument(elbow_cut is None or elbow_cut >= 1, \"elbow_cut must be positive\")\n\n check_argument(\n svd_solver_algorithm in __SVD_SOLVER_TYPES,\n f\"svd_solver_algorithm must be one of the values in {','.join(__SVD_SOLVER_TYPES)}\",\n )\n\n check_argument(svd_solver_iterations >= 1, \"svd_solver_iterations must be positive\")\n\n check_argument(\n svd_seed is None or 0 <= svd_seed <= 2**32 - 1,\n \"svd_seed must be a nonnegative, 32-bit integer\",\n )\n\n used_weight_attribute = _graphs_precondition_checks(graphs, weight_attribute)\n perform_augment_diagonal = not use_laplacian\n\n graph_embeddings = []\n\n # create a graph that contains all nodes and edges across the entire corpus\n union_graph = graphs[0].copy()\n for graph in graphs[1:]:\n union_graph.add_edges_from(graph.edges())\n\n union_graph_lcc: Union[\n nx.Graph, nx.Digraph, nx.OrderedGraph, nx.OrderedDiGraph\n ] = largest_connected_component(union_graph)\n union_graph_lcc_nodes: Set[Any] = set(list(union_graph_lcc.nodes()))\n\n union_node_ids = np.array(list(union_graph_lcc_nodes))\n\n previous_graph = graphs[0].copy()\n\n for graph in graphs[1:]:\n current_graph = graph.copy()\n\n # assure both graphs contain the exact same node set\n # by removing nodes or adding isolates as needed\n _sync_nodes(previous_graph, union_graph_lcc_nodes)\n _sync_nodes(current_graph, union_graph_lcc_nodes)\n\n # remove self loops, run pass to ranks and diagonal augmentation\n previous_graph_augmented = _augment_graph(\n previous_graph,\n union_graph_lcc_nodes,\n used_weight_attribute,\n perform_augment_diagonal=perform_augment_diagonal,\n )\n current_graph_augmented = _augment_graph(\n current_graph,\n union_graph_lcc_nodes,\n used_weight_attribute,\n perform_augment_diagonal=perform_augment_diagonal,\n )\n\n model = OmnibusEmbed(\n n_components=dimensions,\n n_elbows=None, # we will do elbow cuts\n algorithm=svd_solver_algorithm,\n n_iter=svd_solver_iterations,\n check_lcc=False,\n diag_aug=False,\n concat=False,\n svd_seed=svd_seed,\n lse=use_laplacian,\n )\n\n previous_embedding, current_embedding = model.fit_transform(\n graphs=[previous_graph_augmented, current_graph_augmented]\n )\n\n previous_embedding_cut = _elbow_cut_if_needed(\n elbow_cut, graph.is_directed(), model.singular_values_, previous_embedding\n )\n\n current_embedding_cut = _elbow_cut_if_needed(\n elbow_cut, graph.is_directed(), model.singular_values_, current_embedding\n )\n\n graph_embeddings.append(\n (\n Embeddings(union_node_ids, previous_embedding_cut),\n Embeddings(union_node_ids, current_embedding_cut),\n )\n )\n\n return graph_embeddings", "def _pairwise_dot_product(self, src_embeds, vocab_embeds, cosine=False):\n if cosine:\n src_embeds = F.normalize(src_embeds, dim=-1, p=2)\n vocab_embeds = F.normalize(vocab_embeds, dim=-1, p=2)\n # dot product\n dot_product = torch.einsum(\"bij,kj->bik\", (src_embeds, vocab_embeds))\n return dot_product", "def combined_emb(self) -> Tensor:\n return self._combined_emb", "def incorporate_embeddings(self, x):\n all_embedded_data = []\n for embedding_layer_ix, embedding_var in enumerate(self.columns_of_data_to_be_embedded):\n data = x[:, :, embedding_var]\n embedded_data = self.embedding_layers[embedding_layer_ix](data)\n all_embedded_data.append(embedded_data)\n if len(all_embedded_data) > 1: all_embedded_data = Concatenate(axis=2)(all_embedded_data)\n else: all_embedded_data = all_embedded_data[0]\n non_embedded_columns = [col for col in range(x.shape[2]) if col not in self.columns_of_data_to_be_embedded]\n if len(non_embedded_columns) > 0:\n x = tf.gather(x, non_embedded_columns, axis=2)\n x = Concatenate(axis=2)([tf.dtypes.cast(x, float), all_embedded_data])\n else: x = all_embedded_data\n return x", "def build_encoder_bi(tparams, options):\n\t# word embedding (source)\n\tembedding = tensor.tensor3('embedding', dtype='float32')\n\tembeddingr = embedding[::-1]\n\tx_mask = tensor.matrix('x_mask', dtype='float32')\n\txr_mask = x_mask[::-1]\n\n\t# encoder\n\tproj = get_layer(options['encoder'])[1](tparams, embedding, options,\n\t\t\t\t\t\t\t\t\t\t\tprefix='encoder',\n\t\t\t\t\t\t\t\t\t\t\tmask=x_mask)\n\tprojr = get_layer(options['encoder'])[1](tparams, embeddingr, options,\n\t\t\t\t\t\t\t\t\t\t\t prefix='encoder_r',\n\t\t\t\t\t\t\t\t\t\t\t mask=xr_mask)\n\n\tctx = tensor.concatenate([proj[0][-1], projr[0][-1]], axis=1)\n\n\treturn embedding, x_mask, ctx", "def call(self, inputs):\n unpacked_inputs = tf_utils.unpack_inputs(inputs)\n word_embeddings = unpacked_inputs[0]\n token_type_ids = unpacked_inputs[1]\n input_shape = tf_utils.get_shape_list(word_embeddings, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = word_embeddings\n if self.use_type_embeddings:\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n token_type_embeddings = tf.gather(self.type_embeddings,\n flat_token_type_ids)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if self.use_position_embeddings:\n position_embeddings = tf.expand_dims(\n tf.slice(self.position_embeddings, [0, 0], [seq_length, width]),\n axis=0)\n\n output += position_embeddings\n\n output = self.output_layer_norm(output)\n output = self.output_dropout(output)\n\n return output", "def call(self, inputs):\n unpacked_inputs = tf_utils.unpack_inputs(inputs)\n word_embeddings = unpacked_inputs[0]\n token_type_ids = unpacked_inputs[1]\n input_shape = tf_utils.get_shape_list(word_embeddings, expected_rank=3)\n batch_size = input_shape[0]\n seq_length = input_shape[1]\n width = input_shape[2]\n\n output = word_embeddings\n if self.use_type_embeddings:\n flat_token_type_ids = tf.reshape(token_type_ids, [-1])\n token_type_embeddings = tf.gather(self.type_embeddings,\n flat_token_type_ids)\n token_type_embeddings = tf.reshape(token_type_embeddings,\n [batch_size, seq_length, width])\n output += token_type_embeddings\n\n if self.use_position_embeddings:\n position_embeddings = tf.expand_dims(\n tf.slice(self.position_embeddings, [0, 0], [seq_length, width]),\n axis=0)\n\n output += position_embeddings\n\n output = self.output_layer_norm(output)\n output = self.output_dropout(output)\n\n return output", "def from_embeds_pairs(embeds, mask, kernel_size):\n # Mask out words out of sequence len\n embeds = (embeds * mask).unsqueeze(2)\n\n # Expand kernel_size times to [batch, seq_len, kernel_size, network_size]\n from_embeds = embeds.expand(embeds.size(0), embeds.size(1), kernel_size, embeds.size(3))\n\n return from_embeds", "def _get_embedding(self, data):\n embedding_list = [super()._get_embedding(data)]\n context = data['context']\n for i in range(context.shape[1]):\n embedding_list.append(getattr(self, f'context{i}')(context[:, i:i+1]))\n return torch.cat(embedding_list, dim=1)", "def _embed_result(self, embedding):\n # project original embedding\n project_weight = self.project.weight # (o, c)\n project_embedding = embedding.permute(0, 2, 1).unsqueeze(-1) \\\n * project_weight.permute(1, 0) # (n, e, c, 1) * (c, o) -> (n, e, c, o)\n project_embedding = project_embedding.permute(0, 3, 2, 1) # (n, o, c, e)\n # interaction\n square_of_sum = torch.sum(project_embedding, dim=2) ** 2\n sum_of_square = torch.sum(project_embedding ** 2, dim=2)\n embed_result = 0.5 * (square_of_sum - sum_of_square).sum(dim=2)\n return embed_result", "def _embeddings(self, xs):\n n_feats, batch_size, seq_len = xs.size()\n\n assert n_feats == self.n_feats\n\n res = [emb(x) for emb, x in zip(self.embeddings, xs)]\n x = torch.cat(res, 2)\n\n return x", "def embed_sent(self, x: Any) -> expression_seqs.ExpressionSequence:\n # single mode\n if not batchers.is_batched(x):\n expr = expression_seqs.ExpressionSequence(expr_list=[self.embed(word) for word in x])\n # minibatch mode\n elif type(self) == LookupEmbedder:\n embeddings = []\n for word_i in range(x.sent_len()):\n batch = batchers.mark_as_batch([single_sent[word_i] for single_sent in x])\n embeddings.append(self.embed(batch))\n expr = expression_seqs.ExpressionSequence(expr_list=embeddings, mask=x.mask)\n else:\n assert type(x[0]) == sent.SegmentedSentence, \"Need to use CharFromWordTextReader for non standard embeddings.\"\n embeddings = []\n all_embeddings = []\n for sentence in x:\n embedding = []\n for i in range(sentence.len_unpadded()):\n embed_word = self.embed(sentence.words[i])\n embedding.append(embed_word)\n all_embeddings.append(embed_word)\n embeddings.append(embedding)\n # Useful when using dy.autobatch\n dy.forward(all_embeddings)\n all_embeddings.clear()\n # Pad the results\n expr = batchers.pad_embedding(embeddings)\n\n return expr", "def hybrid_forward(self, F, words, wordsmask, subwords, subwordsmask):\n #pylint: disable=arguments-differ\n wordsmask = F.expand_dims(wordsmask, axis=-1)\n embeddings = F.broadcast_mul(self.embedding(words), wordsmask)\n subword_embeddings = self.subword_embedding(subwords, subwordsmask)\n return embeddings + subword_embeddings", "def _generate_embeddings(self, config): \n tr_parts = []\n te_parts = []\n all_columns = []\n for comp in self.components:\n tr_tmp, te_tmp, cols = comp.generate(config)\n if cols != None:\n print(tr_tmp.shape,te_tmp.shape)\n tr_parts.append(tr_tmp)\n te_parts.append(te_tmp)\n all_columns += cols\n X_train = np.concatenate(tr_parts, axis=1)\n X_test = np.concatenate(te_parts, axis=1)\n print(\"Concatenated size:\", X_train.shape, X_test.shape)\n self.feature_columns = all_columns\n return X_train, X_test", "def _get_embedding_layer(self, input_data, doc_input_data):\n opts = self._options\n word_embedding = tf.Variable(tf.random_uniform((self.vocab_size, opts.embed_dim), -1.0, 1.0))\n embed = []\n\n temp = tf.zeros([opts.batch_size, opts.embed_dim])\n embed_d = []\n for n in range(opts.sentence_sample):\n temp = tf.add(temp, tf.nn.embedding_lookup(word_embedding, doc_input_data[:, n]))\n embed_d.append(temp)\n\n if opts.concat == 'True':\n combined_embed_vector_length = opts.embed_dim * opts.window_size + opts.embed_dim\n for j in range(opts.window_size):\n embed_w = tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed.append(embed_w)\n embed.append(embed_d)\n else:\n combined_embed_vector_length = opts.embed_dim\n embed_w = tf.zeros([opts.batch_size, opts.embed_dim])\n for j in range(opts.window_size):\n embed_w += tf.nn.embedding_lookup(word_embedding, input_data[:, j])\n embed_w += embed_d\n embed.append(embed_w)\n\n return tf.concat(embed, 1), word_embedding, combined_embed_vector_length", "def build_embeddings(opt, word_dict, for_encoder='src'):\n if for_encoder=='src':\n embedding_dim = opt.src_word_vec_size #512\n elif for_encoder=='tgt':\n embedding_dim = opt.tgt_word_vec_size\n elif for_encoder=='structure':\n embedding_dim = 64\n\n word_padding_idx = word_dict.stoi[Constants.PAD_WORD]\n num_word_embeddings = len(word_dict)\n \n if for_encoder=='src' or for_encoder=='tgt':\n\n return Embeddings(word_vec_size=embedding_dim,\n position_encoding=opt.position_encoding,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n word_vocab_size=num_word_embeddings,\n sparse=opt.optim == \"sparseadam\")\n elif for_encoder=='structure':\n return Embeddings(word_vec_size=embedding_dim,\n position_encoding=False,\n dropout=opt.dropout,\n word_padding_idx=word_padding_idx,\n word_vocab_size=num_word_embeddings,\n sparse=opt.optim == \"sparseadam\")", "def get_product_input(self):\n result = []\n result.append(self.one_hot_translate_product(self.curr_productfea, self.depthlist))\n result.append(tf.nn.embedding_lookup(self.product_embeddings, self.curr_productid))\n result.append(tf.nn.embedding_lookup(self.aisle_embeddings, self.curr_aisleid))\n result.append(tf.nn.embedding_lookup(self.department_embeddings, self.curr_departmentid))\n result.append(self.one_hot_translate(self.curr_productidx, self.max_productlen+1))\n product_input = tf.concat(result, axis=-1)\n return product_input", "def _add_embedding_layer(model_1, model_2):\n result_layer = torch.nn.Embedding(\n model_1.num_embeddings, model_1.embedding_dim + model_2.embedding_dim\n )\n result_layer.weight = torch.nn.Parameter(\n torch.cat((model_1.weight.data, model_2.weight.data), dim=1)\n )\n return result_layer", "def get_embeddings(self, in_data):\n context, da = in_data\n if self.fixed_divide:\n da_emb = super(PersonageContextDAEmbeddingSeq2SeqExtract, self).get_embeddings(da, pad=True)\n else:\n da_emb = super(PersonageContextDAEmbeddingSeq2SeqExtract, self).get_embeddings(da, pad=False)\n\n # Shubhangi: what this step essentially does is it replaces the context words by their token, with UNK as default.\n # again , we don't need this since our context data is essentially vectors therefore commenting this out\n # similary we don't need context embedding , that's exactly what context is already .\n\n # context_emb = []\n context_emb = [float(parameter[0]) for parameter in context]\n\n # for tok in context[-max_context_len:]:\n # context_emb.append(self.dict_token.get(tok, self.UNK_TOKEN))\n\n # Shubhangi: padding is needed because each context sentence could be of different length ,\n # we don't need to include context in padding as we're going to have a fixed size\n # (max_context_len - len(context)) = 0\n\n\n # padding = [self.UNK_TOKEN] * (max_context_len - len(context))\n\n # Shubhangi: padding might be harmless for now therefore not removing ,\n # essentially what this is doing is concatenating the arrays and sending\n if self.use_div_token:\n return context_emb + [self.DIV_TOKEN] + da_emb\n # return padding + context_emb + [self.DIV_TOKEN] + da_emb\n # return padding + context_emb + da_emb\n return context_emb + da_emb", "def forward(self, term_embeddings):\n dot_out = self.weight(term_embeddings).squeeze(2)\n return f.softmax(dot_out, 1)", "def hybrid_forward(self, F, words, wordsmask):\n #pylint: disable=arguments-differ\n wordsmask = F.expand_dims(wordsmask, axis=-1)\n return F.broadcast_mul(self.embedding(words), wordsmask)", "def call(self, inputs, **kwargs):\n\n # unpack all the requires model inputs, some might be empty tensors:\n [queries, values, queries_mask, values_mask, ids, permutation,\n absolute_positions, relative_positions, pointer_labels, \n logits_labels, partial_pos, pointer_probs, log_probs,\n object_detections, object_features, object_boxes] = inputs\n\n y = self.detection_embedding(object_detections, **kwargs)\n values = self.dense(tf.concat([\n object_features, object_boxes, y], 2), **kwargs)\n a = position_encoding(tf.shape(queries)[1], self.hidden_size)\n b = self.word_embedding(queries, **kwargs)\n if self.mode == 'decoder':\n b = tf.matmul(absolute_positions, b)\n if self.decoder_pos_emb:\n b = a + b \n elif self.mode == 'pt' and self.decoder_pos_emb:\n # we do need positional encoding for Permutation Transformer\n b = a + b\n \n return [b, values, queries_mask, values_mask, ids, permutation,\n absolute_positions, relative_positions,\n pointer_labels, logits_labels, \n partial_pos, pointer_probs, log_probs,\n object_detections, object_features, object_boxes]", "def dot_product(v, w):\n return sum(v_i * w_i for v_i, w_i in zip(v, w))", "def build_encoder(tparams, options):\n\t# word embedding (source)\n\tembedding = tensor.tensor3('embedding', dtype='float32')\n\tx_mask = tensor.matrix('x_mask', dtype='float32')\n\n\t# encoder\n\tproj = get_layer(options['encoder'])[1](tparams, embedding, options,\n\t\t\t\t\t\t\t\t\t\t\tprefix='encoder',\n\t\t\t\t\t\t\t\t\t\t\tmask=x_mask)\n\tctx = proj[0][-1]\n\n\treturn embedding, x_mask, ctx", "def add(v: Vector, w: Vector) -> Vector:\n assert len(v) == len(w), 'both vectors must have the same length'\n\n return [v_item + w_item for v_item, w_item in zip(v, w)]", "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v,w)]", "def embed(self, loader, model):\n print(\" ** Embedding words\")\n\n words = loader.words\n vectors = [model.get_word_vector(word) for word in words]\n\n return [(w, *v) for w, v in zip(words, vectors)]", "def vector_add(v, w):\n return [v_i + w_i for v_i, w_i in zip(v, w)]" ]
[ "0.5955232", "0.58174616", "0.58171684", "0.57225627", "0.56999433", "0.5671518", "0.5625221", "0.55981785", "0.55981785", "0.5576363", "0.5552107", "0.55190223", "0.545595", "0.5426745", "0.53774136", "0.5334926", "0.5326682", "0.5315223", "0.5301722", "0.5273913", "0.52737385", "0.52514243", "0.524287", "0.52410555", "0.5201827", "0.5201339", "0.520023", "0.5191314", "0.51907563", "0.5183631" ]
0.7343154
0
Gets the suffix of a feature from its name.
def get_feature_suffix(feature_name: str) -> str: if "_" not in feature_name: return "" return feature_name.split("_")[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def suffix(self):\n return self[\"suffix\"]", "def suffix(self):\n return self[\"suffix\"]", "def suffix(self) -> typing.Optional[str]:\n return self._values.get('suffix')", "def suffix ( self ) :\n return self.__suffix", "def suffix ( self ) :\n return self.__suffix", "def suffix(self):\n return self._suffix", "def suffix(self):\n return self._suffix", "def suffix(self):\n return self._suffix", "def get_suffix(self,prefix):\n return random.choice(self[prefix])", "def suffix(value: str, sep: str = \":\") -> str:\n return split(value, sep)[1]", "def name(self):\n suffix = ''\n if 'suffix' in self.context['strategy'] \\\n and self.context['strategy']['suffix'] is not None \\\n and len(self.context['strategy']['suffix']) > 0:\n suffix = \"_\" + self.context['strategy']['suffix']\n\n return self.get_name(self.context, suffix)", "def replace_suffix (name, new_suffix):\n assert isinstance(name, basestring)\n assert isinstance(new_suffix, basestring)\n split = os.path.splitext (name)\n return split [0] + new_suffix", "def get_module_dict_key_from_name(name: str, feature_name_suffix: str = FEATURE_NAME_SUFFIX) -> str:\n key = name.replace(\".\", \"__ludwig_punct_period__\")\n return key + feature_name_suffix", "def get_module_dict_key_from_name(name: str, feature_name_suffix: str=FEATURE_NAME_SUFFIX) ->str:\n key = name.replace('.', '__ludwig_punct_period__')\n return key + feature_name_suffix", "def get_suffix_ml_model():\n suffix = ''\n \n # consider if the model uses tail or not\n if gml.USE_TAIL: \n suffix += '_use_tail'\n else: \n suffix += '_no_tail'\n\n # consider the way of picking target variable for the model\n if gml.WAY_MODEL_TGT == 'absolute':\n suffix += '_absolute'\n elif gml.WAY_MODEL_TGT == 'relative':\n suffix += '_relative'\n else: \n exit('error on the function that gets suffix')\n\n return suffix", "def get_name(self, suffix_number: int = 0) -> str:\n if suffix_number:\n return f\"{self.name}_{suffix_number}\"\n return cast(str, self.name)", "def removesuffix(self, x) -> String:\n pass", "def suffix(self) -> Optional[URISuffix]:\n return self._suffix", "def suffix():\r\n\r\n return _random.choice(\r\n [\r\n 'Sr.', 'Jr.', 'II', 'III', 'IV', 'V'\r\n ]\r\n )", "def getSuffixPattern(self):\n return self.getOrDefault(\"suffixPattern\")", "def add_suffix(name: str, suffix: str):\n return f'{name}_{suffix}'", "def get_suffix(self):\n if self.device:\n return self.device\n else:\n return '%s%d' % (self.devletters(), self.get_index() + 1)", "def feature(root, suffix):\r\n if suffix == '$':\r\n return ('$', suffix)\r\n return (root[-1], suffix[0])", "def get_suffix(cls, raw_disable: RawDisable) -> str:\n variations = raw_disable.parent_test.variations\n\n maybe_variation_node = raw_disable.node.find(f'.//{cls.VARIATION_TAG}')\n if maybe_variation_node is None:\n return ''\n\n variation = maybe_variation_node.text\n if variation not in variations:\n raise DisableNodeProcessingException(f'could not find {variation!r} in defined variations; skipping node')\n\n idx = variations.index(variation)\n suffix = f'_{idx}'\n return suffix", "def _last_name(self, full_name):\n name_partition = full_name.partition(u',')\n no_suffix = name_partition[0].strip()\n suffix = name_partition[2].strip()\n name_parts = no_suffix.split()\n part_count = len(name_parts)\n if part_count == 1 or part_count == 2:\n return name_parts[-1], suffix\n else:\n assert part_count > 2\n if name_parts[-2].islower():\n return u' '.join(name_parts[-2:]), suffix\n else:\n return name_parts[-1], suffix", "def getSuffixesForWord(self, word):\n suffixes = self.word_suffixes.get(word, False)\n if suffixes is not False:\n return suffixes\n suffixes = []\n if word.isalpha():\n boundary = min(5, len(word))\n for i in range(1, boundary):\n suffixes.append(word[-i:])\n suffixes = tuple(suffixes)\n self.word_suffixes[word] = suffixes\n return suffixes", "def _metric_name(self, suffix):\r\n return '{}.{}'.format(self.METRIC_NAME, suffix)", "def is_suffix(suffix: str, word: str):\n return word.endswith(suffix)", "def get_name(strategy_context, suffix=''):\n return '{0}_{1}_{2}{3}'.format(strategy_context['strategy']['exo_name'],\n Swarm.get_direction(strategy_context)[1],\n strategy_context['strategy']['class'].name,\n suffix)", "def name_woext(self):\n return os.path.splitext(self._job)[0]" ]
[ "0.71289575", "0.71289575", "0.69416517", "0.6781878", "0.6781878", "0.67778254", "0.67778254", "0.67778254", "0.65710884", "0.6557411", "0.64998925", "0.6475674", "0.63860905", "0.63544905", "0.63543004", "0.62719065", "0.6252688", "0.6237147", "0.62303", "0.6210002", "0.6209157", "0.62060016", "0.61980784", "0.6170429", "0.6122284", "0.61080474", "0.60746974", "0.6073302", "0.6063546", "0.6032331" ]
0.87202376
0
In the CoAP client read method, different exceptions can arise from the DTLS stack. Depending on the type of exception, a continuation might not be possible, or a logging might be desirable. With this callback both needs can be satisfied.
def _cb_ignore_read_exception(self, exception, client): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _cb_ignore_read_exception(self, exception, client):\n return False", "def test_wantReadError(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n with pytest.raises(WantReadError):\n conn.bio_read(1024)", "def _cb_ignore_listen_exception(self, exception, server):\n if isinstance(exception, ssl.SSLError):\n # A client which couldn’t verify the server tried to connect, continue but log the event\n if exception.errqueue[-1][0] == ssl.ERR_TLSV1_ALERT_UNKNOWN_CA:\n #print(\"Ignoring ERR_TLSV1_ALERT_UNKNOWN_CA from client %s\" %\n # (’unknown’ if not hasattr(exception, ’peer’) else str(exception.peer)))\n return True\n # ... and more ...\n return False", "async def read_or_exc(reader, n, timeout = None):\n\n\ttemp = await asyncio.gather(*[asyncio.wait_for(reader.read(n), timeout = timeout)], return_exceptions=True)\n\tif isinstance(temp[0], bytes):\n\t\treturn temp[0]\n\telse:\n\t\traise temp[0]", "def failure(self, cb: CircuitBreaker, exc: BaseException) -> None:", "def inReadEvent(self):\r\n try:\r\n self._checkAssert()\r\n if self.handshaker:\r\n self._doHandshakeOp()\r\n elif self.closer:\r\n self._doCloseOp()\r\n elif self.reader:\r\n self._doReadOp()\r\n elif self.writer:\r\n self._doWriteOp()\r\n else:\r\n self.reader = self.tlsConnection.readAsync(16384)\r\n self._doReadOp()\r\n except:\r\n self._clear()\r\n raise", "def exception(self):\n\n try:\n if self.conn1.poll():\n # There is something to read.\n\n # We get and save the exception.\n self._exception_receiver = self.conn1.recv()\n except EOFError:\n pass\n\n self.conn2.close()\n\n return self._exception_receiver", "def whenException(self, channel, call):", "def test_alpn_callback_exception(self):\n select_args = []\n\n def select(conn, options):\n select_args.append((conn, options))\n raise TypeError()\n\n client_context = Context(SSLv23_METHOD)\n client_context.set_alpn_protos([b\"http/1.1\", b\"spdy/2\"])\n\n server_context = Context(SSLv23_METHOD)\n server_context.set_alpn_select_callback(select)\n\n # Necessary to actually accept the connection\n server_context.use_privatekey(\n load_privatekey(FILETYPE_PEM, server_key_pem)\n )\n server_context.use_certificate(\n load_certificate(FILETYPE_PEM, server_cert_pem)\n )\n\n # Do a little connection to trigger the logic\n server = Connection(server_context, None)\n server.set_accept_state()\n\n client = Connection(client_context, None)\n client.set_connect_state()\n\n with pytest.raises(TypeError):\n interact_in_memory(server, client)\n assert select_args == [(server, [b\"http/1.1\", b\"spdy/2\"])]", "def test_exceptions_in_client_bubble_up(self):\n\n class SentinelException(Exception):\n pass\n\n def server_callback(*args):\n return self.sample_ocsp_data\n\n def client_callback(*args):\n raise SentinelException()\n\n client = self._client_connection(callback=client_callback, data=None)\n server = self._server_connection(callback=server_callback, data=None)\n\n with pytest.raises(SentinelException):\n handshake_in_memory(client, server)", "async def readexactly_or_exc(reader, n, timeout = None):\n\ttemp = await asyncio.gather(*[asyncio.wait_for(reader.readexactly(n), timeout = timeout)], return_exceptions=True)\n\tif isinstance(temp[0], bytes):\n\t\treturn temp[0]\n\telse:\n\t\traise temp[0]", "def _read_no_check(self):\n on_data_fut = _async.Future(self._loop)\n def on_data():\n on_data_fut.set_result(None)\n self._loop.add_reader(self._channel.fileno(), on_data)\n\n try:\n yield From(on_data_fut)\n finally:\n self._loop.remove_reader(self._channel.fileno())\n\n d = self._channel.recv(_MAX_READ_AMOUNT).decode()\n if d == '':\n logger.debug(\"{}: Read returned {!r}\".format(self, d))\n raise TransportNotConnected\n\n logger.debug(\"{}: Read {!r}\".format(self, d))\n if d.endswith(\"\\r\\n\"):\n d = d[:-2] + \"\\n\"\n d = d.encode('ascii')\n\n raise Return(d)", "def _outgoing_read_cb(self, peer_id, tcp_handle, data, error):\n\n outgoing = self._outgoing[peer_id]\n\n if error is not None:\n callbacks = outgoing.callbacks.values()\n self._unwind_outgoing(outgoing)\n del self._outgoing[peer_id]\n\n for cb in callbacks:\n cb(PEER_DISCONNECT, None)\n return\n\n outgoing.unpacker.feed(data)\n for payload in outgoing.unpacker:\n req_id, status, message = payload\n callback = outgoing.callbacks.get(req_id)\n\n if callback is not None:\n del outgoing.callbacks[req_id]\n callback(status, message)", "def _cb_ignore_write_exception(self, exception, client):\n return False", "def callback_exception(*args, **kwargs):\n raise DemoCallbackException()", "async def readuntil_or_exc(reader, separator = b'\\n', timeout = None):\n\n\ttemp = await asyncio.gather(*[asyncio.wait_for(reader.readuntil(separator), timeout = timeout)], return_exceptions=True)\n\tif isinstance(temp[0], bytes):\n\t\treturn temp[0]\n\telse:\n\t\traise temp[0]", "def _cb_ignore_write_exception(self, exception, client):\n return False", "def func_on_exception(*args, **keys):\n try:\n yield\n except Exception as exc:\n reraise = func(*args + (\":\", str(exc)), **keys)\n if not CRDS_EXCEPTION_TRAP:\n # In python-2, distinction between raise and \"raise something\". raise doesn't\n # wreck the traceback, raising a new improved exception does.\n raise\n # Augmented, the traceback is trashed from here down but the message is better when caught higher up.\n elif reraise:\n exc_class = keys.pop(\"exception_class\", exc.__class__)\n keys[\"end\"] = \"\"\n raise exc_class(format(*args + (\":\", str(exc)), **keys)) from exc\n else:\n pass # snuff the exception, func() probably issued a log message.", "def connection_lost(self, exc):\n if self._closing:\n return\n self._closing = True\n\n # inform yielding readers about closed connection\n if exc is None:\n logger.info(\"Connection closed for %s\", self)\n self.reader.feed_eof()\n else:\n logger.info(\"Connection lost for %s: %s\", self, exc)\n self.reader.set_exception(exc)\n\n # cancel protocol tasks, namely on-connect negotiations\n for task in self._tasks:\n task.cancel()\n\n # close transport (may already be closed), set _waiter_closed and\n # cancel Future _waiter_connected.\n self._transport.close()\n self._waiter_connected.cancel()\n if self.shell is None and self._waiter_closed is not None:\n # raise deprecation warning, _waiter_closed should not be used!\n self._waiter_closed.set_result(weakref.proxy(self))\n\n # break circular references.\n self._transport = None", "def handle_read(self):\n if self.established:\n return self._handle_read()\n self._handshake()", "def retryable_reads_supported(self):\n ...", "def _handle_read(self):\n pass", "def test_read_unexpected_error(self, data, requests_mock, capsys):\n requests_mock.get(data_url, exc=ConnectionError)\n with pytest.raises(ConnectionError):\n r = operations.read(data_url)\n assert 'Unexpected error when connecting to' in capsys.readouterr().out", "def __async_read_callback(self, data, err) -> None:\n if err != 0:\n logging.info('async_read (1): disconnected')\n self.close()\n elif not data:\n logging.info('async_read (2): disconnected')\n self.close()\n elif self.__is_active:\n # Push incoming data through Telnet Option Parser.\n self.receive_buffer.clear()\n for byte in data:\n # Add parsed text data\n return_byte = self.__telnet_parser.iac_sniffer(bytes([byte]))\n if return_byte is not None:\n # logging.info('byte received: {byte}'.format(byte=return_byte))\n # bytes_parsed = bytes_parsed + return_byte\n self.receive_buffer.append(return_byte)\n\n # Data other than Telnet Options, then send back to client. or push through system!!\n if len(self.receive_buffer) > 0:\n # This should now be pushed through for\n # Input on the STATE instead of echoed back!\n logging.info(\"Echo %s\", self.receive_buffer)\n self.async_write(b''.join(self.receive_buffer))\n\n # Ready for next set of incoming data\n self.wait_for_async_data()", "def _callback_error_handler(exception, exc_value, tb):\n # From cffi docs: \"First check if traceback is not None (it is None e.g.\n # if the whole function ran successfully but there was an error converting\n # the value returned: this occurs after the call).\"\n if tb is not None:\n status = tb.tb_frame.f_locals['status']\n msg = str_to_bytes(''.join(traceback.format_exception(\n exception, exc_value, tb)))\n lib.mongocrypt_status_set(\n status, lib.MONGOCRYPT_STATUS_ERROR_CLIENT, 1, msg, -1)\n\n return False", "def libvirt_retry(self, op):\n end_time = time.time() + 30.0\n ignore = [\n # libvirt connection closed for some reason, just retry\n \"Unable to read from monitor: Connection reset by peer\",\n # lxc container starting often fails as they're started\n # simultaneously with the same device names, use a unique\n # name to work around it.\n # http://www.redhat.com/archives/libvir-list/2013-August/msg01475.html\n \"RTNETLINK answers: File exists\",\n ]\n while True:\n try:\n return op()\n except libvirt.libvirtError as error:\n if not any(ignorable in str(error) for ignorable in ignore):\n # some other error, raise immediately\n raise\n\n time_left = max(end_time - time.time(), 0)\n if not time_left:\n # timeout\n raise\n\n self.log.warning(\"got possibly transient error '%s' from libvirt, retrying for %.1fs...\",\n error, time_left)\n time.sleep(1.0)", "def __call__(self, exception: Optional[Type[Exception]] = None) -> Generator[_HandshakeContext, None, None]:\n try:\n self.ready()\n yield self._context\n except BaseException as err:\n if exception is not None and isinstance(err, exception):\n self._context.error = err\n else:\n raise\n finally:\n self.done()", "def context_errored(self, cls, example, exception):", "def __call__(self, connection):\r\n if not self.checkResumedSession and connection.resumed:\r\n return\r\n\r\n if self.x509Fingerprint:\r\n if connection._client:\r\n chain = connection.session.serverCertChain\r\n else:\r\n chain = connection.session.clientCertChain\r\n\r\n if self.x509Fingerprint:\r\n if isinstance(chain, X509CertChain):\r\n if self.x509Fingerprint:\r\n if chain.getFingerprint() != self.x509Fingerprint:\r\n raise TLSFingerprintError(\\\r\n \"X.509 fingerprint mismatch: %s, %s\" % \\\r\n (chain.getFingerprint(), self.x509Fingerprint))\r\n elif chain:\r\n raise TLSAuthenticationTypeError()\r\n else:\r\n raise TLSNoAuthenticationError()", "def _try_receive(connection):\n result = connection.recv()\n if result == -2:\n # An exception has occurred on the other end\n e, tb_str = connection.recv()\n # The other end does not send an actual traceback object because these are\n # not picklable, but a string representation.\n logger.debug(\"%s\", tb_str)\n for child in multiprocessing.active_children():\n child.terminate()\n raise e\n return result" ]
[ "0.66514987", "0.5624444", "0.55150366", "0.54504734", "0.53648037", "0.5351862", "0.5241772", "0.5215485", "0.5212549", "0.51477045", "0.514248", "0.51263565", "0.5021402", "0.49990284", "0.49652463", "0.49529836", "0.49523494", "0.4938335", "0.49164072", "0.49018252", "0.48992628", "0.48938054", "0.4879673", "0.4850755", "0.48485112", "0.48249036", "0.48123476", "0.48100612", "0.47766262", "0.4772769" ]
0.66081977
1
In the CoAP client write method, different exceptions can arise from the DTLS stack. Depending on the type of exception, a continuation might not be possible, or a logging might be desirable. With this callback both needs can be satisfied.
def _cb_ignore_write_exception(self, exception, client): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _cb_ignore_write_exception(self, exception, client):\n return False", "def failure(self, cb: CircuitBreaker, exc: BaseException) -> None:", "def _callback_error_handler(exception, exc_value, tb):\n # From cffi docs: \"First check if traceback is not None (it is None e.g.\n # if the whole function ran successfully but there was an error converting\n # the value returned: this occurs after the call).\"\n if tb is not None:\n status = tb.tb_frame.f_locals['status']\n msg = str_to_bytes(''.join(traceback.format_exception(\n exception, exc_value, tb)))\n lib.mongocrypt_status_set(\n status, lib.MONGOCRYPT_STATUS_ERROR_CLIENT, 1, msg, -1)\n\n return False", "def __async_write_callback(self, err) -> None:\n if err != 0:\n logging.info('async_write: disconnected')\n self.close()\n # elif self.__is_active:\n # Data was writen to socket. just handle errors if any.\n # logging.info('async_write: OK')", "async def __aexit__(self, err_type, err_value, err_t):\n if err_type and err_type != asyncio.CancelledError:\n self.logger.exception(\"Exception in outbound transport\")\n await self.stop()", "def whenException(self, channel, call):", "def bye_on_error_cb(participant):\n def outer_wrapper(fn):\n def wrapper(*args, **kwargs):\n try:\n fn(*args, **kwargs)\n except Exception:\n logging.error(\n \"Hit unhandled exception. Cleaning up then rethrowing.\"\n )\n\n # This is nasty.\n participant._bye()('')\n\n raise\n\n return wrapper\n\n return outer_wrapper", "def callback_exception(*args, **kwargs):\n raise DemoCallbackException()", "def test_exceptions_in_client_bubble_up(self):\n\n class SentinelException(Exception):\n pass\n\n def server_callback(*args):\n return self.sample_ocsp_data\n\n def client_callback(*args):\n raise SentinelException()\n\n client = self._client_connection(callback=client_callback, data=None)\n server = self._server_connection(callback=server_callback, data=None)\n\n with pytest.raises(SentinelException):\n handshake_in_memory(client, server)", "def test_wantWriteError(self):\n client_socket, server_socket = socket_pair()\n # Fill up the client's send buffer so Connection won't be able to write\n # anything. Start by sending larger chunks (Windows Socket I/O is slow)\n # and continue by writing a single byte at a time so we can be sure we\n # completely fill the buffer. Even though the socket API is allowed to\n # signal a short write via its return value it seems this doesn't\n # always happen on all platforms (FreeBSD and OS X particular) for the\n # very last bit of available buffer space.\n for msg in [b\"x\" * 65536, b\"x\"]:\n for i in range(1024 * 1024 * 64):\n try:\n client_socket.send(msg)\n except error as e:\n if e.errno == EWOULDBLOCK:\n break\n raise # pragma: no cover\n else: # pragma: no cover\n pytest.fail(\n \"Failed to fill socket buffer, cannot test BIO want write\"\n )\n\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, client_socket)\n # Client's speak first, so make it an SSL client\n conn.set_connect_state()\n with pytest.raises(WantWriteError):\n conn.do_handshake()", "def on_ssl_wrap_error(self, sock, addr, exception):\n log.exception(exception)\n try:\n sock.close()\n except socket.error:\n pass", "def writer_wrapper_3(coroutine):\n coroutine.send(None) # prime the coro\n while True:\n try:\n try:\n x = (yield)\n except SpamException as e: # This catches the SpamException\n coroutine.throw(e)\n else:\n coroutine.send(x)\n except StopIteration:\n pass", "def _cb_ignore_listen_exception(self, exception, server):\n if isinstance(exception, ssl.SSLError):\n # A client which couldn’t verify the server tried to connect, continue but log the event\n if exception.errqueue[-1][0] == ssl.ERR_TLSV1_ALERT_UNKNOWN_CA:\n #print(\"Ignoring ERR_TLSV1_ALERT_UNKNOWN_CA from client %s\" %\n # (’unknown’ if not hasattr(exception, ’peer’) else str(exception.peer)))\n return True\n # ... and more ...\n return False", "def _cb_ignore_read_exception(self, exception, client):\n return False", "def _cb_ignore_read_exception(self, exception, client):\n return False", "def retryable_writes_supported(self):\n ...", "def test_exceptions_in_server_bubble_up(self):\n\n class SentinelException(Exception):\n pass\n\n def server_callback(*args):\n raise SentinelException()\n\n def client_callback(*args): # pragma: nocover\n pytest.fail(\"Should not be called\")\n\n client = self._client_connection(callback=client_callback, data=None)\n server = self._server_connection(callback=server_callback, data=None)\n\n with pytest.raises(SentinelException):\n handshake_in_memory(client, server)", "def exception(self, *args, **kwargs):", "def test_alpn_callback_exception(self):\n select_args = []\n\n def select(conn, options):\n select_args.append((conn, options))\n raise TypeError()\n\n client_context = Context(SSLv23_METHOD)\n client_context.set_alpn_protos([b\"http/1.1\", b\"spdy/2\"])\n\n server_context = Context(SSLv23_METHOD)\n server_context.set_alpn_select_callback(select)\n\n # Necessary to actually accept the connection\n server_context.use_privatekey(\n load_privatekey(FILETYPE_PEM, server_key_pem)\n )\n server_context.use_certificate(\n load_certificate(FILETYPE_PEM, server_cert_pem)\n )\n\n # Do a little connection to trigger the logic\n server = Connection(server_context, None)\n server.set_accept_state()\n\n client = Connection(client_context, None)\n client.set_connect_state()\n\n with pytest.raises(TypeError):\n interact_in_memory(server, client)\n assert select_args == [(server, [b\"http/1.1\", b\"spdy/2\"])]", "def cmdfinalization_hook_passthrough_exception(\n self, data: cmd2.plugin.CommandFinalizationData\n ) -> cmd2.plugin.CommandFinalizationData:\n self.called_cmdfinalization += 1\n wrapped_ex = OSError(\"Pass me up\")\n raise exceptions.PassThroughException(wrapped_ex=wrapped_ex)", "def context_errored(self, cls, example, exception):", "def handle_error(self, p_ctx, others, error, start_response):\n\n if p_ctx.transport.resp_code is None:\n p_ctx.transport.resp_code = \\\n p_ctx.out_protocol.fault_to_http_response_code(error)\n\n self.get_out_string(p_ctx)\n p_ctx.out_string = [b''.join(p_ctx.out_string)]\n\n p_ctx.transport.resp_headers['Content-Length'] = \\\n str(len(p_ctx.out_string[0]))\n self.event_manager.fire_event('wsgi_exception', p_ctx)\n\n start_response(p_ctx.transport.resp_code,\n _gen_http_headers(p_ctx.transport.resp_headers))\n\n try:\n process_contexts(self, others, p_ctx, error=error)\n except Exception as e:\n # Report but ignore any exceptions from auxiliary methods.\n logger.exception(e)\n\n return itertools.chain(p_ctx.out_string, self.__finalize(p_ctx))", "def failsafe(return_arg_num_on_failure):\n def deco(func):\n def safe(*args):\n try:\n return func(*args)\n except:\n import traceback, sys \n sys.stderr.write(\"ERROR: Exception during callback \")\n try:\n sys.stderr.write(\"%s\\n\" % (map(str, args)))\n except:\n pass\n traceback.print_exc()\n return args[return_arg_num_on_failure]\n return safe\n return deco", "def on_failure(self, exc: BaseException) -> None:", "def wrap_exception(service, binary):\n def inner(f):\n def wrapped(self, context, *args, **kw):\n # Don't store self or context in the payload, it now seems to\n # contain confidential information.\n try:\n return f(self, context, *args, **kw)\n except Exception as exc:\n with excutils.save_and_reraise_exception():\n call_dict = _get_call_dict(f, self, context, *args, **kw)\n function_name = f.__name__\n\n _emit_legacy_exception_notification(\n context, exc, service, function_name, call_dict)\n _emit_versioned_exception_notification(\n context, exc, binary)\n return functools.wraps(f)(wrapped)\n return inner", "def handle_exceptions(future, chat):\n\n if not hasattr(future, 'add_done_callback'):\n log.error('Could not attach callback. Exceptions will be missed.')\n return\n\n def cb(future):\n \"\"\"Custom callback which is chat aware.\"\"\"\n try:\n future.result()\n except:\n chat.reply('Script had an error.')\n # FIXME: send to log, not stdout\n traceback.print_exc(file=sys.stdout)\n\n # Tornado functionality to add a custom callback\n future.add_done_callback(cb)", "def _error_handling(self,e,func):\n print(self.type, \" sufferred exception in \" , func , \":\" , e)", "def on_exception(self):\n\n def decorator(coro):\n self._hooks.append((\"exception\", coro))\n return coro\n\n return decorator", "def test_scp_callback_exception(self):\n self.scp = DummyVerificationSCP()\n def on_c_echo(context, info):\n raise ValueError\n self.scp.ae.on_c_echo = on_c_echo\n self.scp.start()\n\n ae = AE()\n ae.add_requested_context(VerificationSOPClass)\n assoc = ae.associate('localhost', 11112)\n assert assoc.is_established\n rsp = assoc.send_c_echo()\n assert rsp.Status == 0x0000\n assoc.release()\n self.scp.stop()", "def certificateError(\n self, listener: Optional[Callable[[Dict[str, Any]], Any]] = None\n ) -> Any:\n event_name = \"Security.certificateError\"\n if listener is None:\n future = self.client.loop.create_future()\n\n def _listener(event: Optional[Dict] = None) -> None:\n future.set_result(event)\n\n self.client.once(event_name, _listener)\n\n return future\n\n self.client.on(event_name, listener)\n return lambda: self.client.remove_listener(event_name, listener)" ]
[ "0.6399046", "0.56971824", "0.5395722", "0.5373552", "0.52277315", "0.5194488", "0.51759434", "0.5153259", "0.51442254", "0.5128307", "0.51009566", "0.50440973", "0.50042504", "0.4984776", "0.49645448", "0.49513006", "0.49485222", "0.49382952", "0.48677444", "0.48533612", "0.48465526", "0.48347375", "0.4814164", "0.4804895", "0.47747394", "0.4755196", "0.47416073", "0.4721992", "0.47213063", "0.470141" ]
0.63898355
1
Set current sensors values+10% as the comparison baseline for trigger
def set_sensor_baseline(self, sensor_data=None): cont = self.continuous(sensor_data) for idx,val in enumerate(cont): self.thresholds[idx][0] = val*1.1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _getBaselineThresh(self):\n print('Calculating 10% baseline')\n self.baseline = obrienBaseline.obrienBaseline(\n self.d['dos1rate'], timeWidth=5.0, \n cadence=0.1)\n self.peak_std = ( (self.d['dos1rate'][self.peakInd]/10 - \n self.baseline[self.peakInd]/10)/ \n np.sqrt(self.d['dos1rate'][self.peakInd]/10))\n return", "def valuechange():\n\n tempmin.setMaximum(tempmax.value())\n tempmax.setMinimum(tempmin.value())\n hummin.setMaximum(hummax.value())\n hummax.setMinimum(hummin.value())\n\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmin\"\n ] = tempmin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_tempmax\"\n ] = tempmax.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummin\"\n ] = hummin.value()\n self.variables.default_values_dict[\"settings\"][\n \"current_hummax\"\n ] = hummax.value()\n\n max = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummax\", hummax.value()),\n )\n min = build_command(\n self.variables.devices_dict[\"temphum_controller\"],\n (\"set_hummin\", hummin.value()),\n )\n\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], max\n )\n self.variables.vcw.write(\n self.variables.devices_dict[\"temphum_controller\"], min\n )", "def calibrate_sensors(self):\n for j in range(0, 10):\n self.read_sensors()\n for i in range(0, self.NUM_SENSORS):\n if self.calibratedMax[i] < self.sensorValues[i]:\n self.calibratedMax[i] = self.sensorValues[i]\n if self.calibratedMin[i] > self.sensorValues[i] and self.sensorValues[i] > 30:\n self.calibratedMin[i] = self.sensorValues[i]", "def auto(self):\n self.set_thermostat = 1 if self.desired_values[0] > self.data[0] else 0\n self.set_humidifier = 1 if self.desired_values[1] > self.data[1] else 0\n self.set_sprinklers = 1 if self.desired_values[2] > self.data[2] else 0\n self.set_ventilation = 1 if (self.desired_values[3] > self.data[3] or self.desired_values[4] < self.data[4]) else 0", "def rec_default(self):\n self.average_triggers.setText('(-50,1)')", "def update_relative_weight(self):\n self.relative_weight = 1\n # Add up all of the historical cpu datapoints (higher CPU = more weight)\n for i in self.cpu_datapoints:\n self.relative_weight += i\n # Multiply by the status value (so VMs with red alarm have most weight)\n self.relative_weight *= (self.heartbeat_status * 10)", "def test_up_using_trendline(self):\n assert setup.setup_component(\n self.opp,\n \"binary_sensor\",\n {\n \"binary_sensor\": {\n \"platform\": \"trend\",\n \"sensors\": {\n \"test_trend_sensor\": {\n \"entity_id\": \"sensor.test_state\",\n \"sample_duration\": 10000,\n \"min_gradient\": 1,\n \"max_samples\": 25,\n }\n },\n }\n },\n )\n self.opp.block_till_done()\n\n now = dt_util.utcnow()\n for val in [10, 0, 20, 30]:\n with patch(\"openpeerpower.util.dt.utcnow\", return_value=now):\n self.opp.states.set(\"sensor.test_state\", val)\n self.opp.block_till_done()\n now += timedelta(seconds=2)\n\n state = self.opp.states.get(\"binary_sensor.test_trend_sensor\")\n assert state.state == \"on\"\n\n # have to change state value, otherwise sample will lost\n for val in [0, 30, 1, 0]:\n with patch(\"openpeerpower.util.dt.utcnow\", return_value=now):\n self.opp.states.set(\"sensor.test_state\", val)\n self.opp.block_till_done()\n now += timedelta(seconds=2)\n\n state = self.opp.states.get(\"binary_sensor.test_trend_sensor\")\n assert state.state == \"off\"", "def update(self):\n if self.temperature != None and self.humidity != None:\n self.sensor.set_environmental_data(self.humidity, self.temperature)\n# Trim away error values.\n new_eco2 = self.sensor.eco2\n if new_eco2 < 65535:\n self.eco2 = new_eco2\n self.tvoc = self.sensor.tvoc", "def __slider_anomaly_threshold_value_changed(self):\n val = self.slider_anomaly_threshold.value() / 10000\n self.label_anomaly_threshold.setText(\"Anomaly threshold: %.4f\" % round(val, 5))\n self.vis.change_anomaly_thresh(val)\n self.vis.show_anomalies(self.checkbox_anomalies.isChecked())\n self.anomaly_threshold = val", "def test_change_lower(self):\n instance = self.traits_instance\n instance.low = -4.0\n instance.value = -2\n self.assertAlmostEqual(instance.value, -2)", "def read_core_vbat(self) -> float:", "def update(self):\n self.value = self.sensor.update()", "def update(self, current, values=None):\n values = values or []\n for k, v in values:\n if k not in self._values_order:\n self._values_order.append(k)\n if k not in self.stateful_metrics:\n if k not in self._values:\n self._values[k] = [v * (current - self._seen_so_far),\n current - self._seen_so_far]\n else:\n self._values[k][0] += v * (current - self._seen_so_far)\n self._values[k][1] += (current - self._seen_so_far)\n else:\n self._values[k] = v\n self._seen_so_far = current\n\n now = time.time()\n info = ' - %.0fs' % (now - self._start)\n if self.verbose == 1:\n if (now - self._last_update < self.interval and\n self.target is not None and current < self.target):\n return\n\n prev_total_width = self._total_width\n if self._dynamic_display:\n sys.stdout.write('\\b' * prev_total_width)\n sys.stdout.write('\\r')\n else:\n sys.stdout.write('\\n')\n\n if self.target is not None:\n numdigits = int(np.floor(np.log10(self.target))) + 1\n barstr = '%%%dd/%d [' % (numdigits, self.target)\n bar = barstr % current\n prog = float(current) / self.target\n prog_width = int(self.width * prog)\n if prog_width > 0:\n bar += ('=' * (prog_width - 1))\n if current < self.target:\n bar += '>'\n else:\n bar += '='\n bar += ('.' * (self.width - prog_width))\n bar += ']'\n else:\n bar = '%7d/Unknown' % current\n\n self._total_width = len(bar)\n sys.stdout.write(bar)\n\n if current:\n time_per_unit = (now - self._start) / current\n else:\n time_per_unit = 0\n # if self.target is not None and current < self.target:\n if self.max_iters is None or self.iters < self.max_iters:\n eta = time_per_unit * (self.target - current)\n if eta > 3600:\n eta_format = '%d:%02d:%02d' % (eta // 3600,\n (eta % 3600) // 60,\n eta % 60)\n elif eta > 60:\n eta_format = '%d:%02d' % (eta // 60, eta % 60)\n else:\n eta_format = '%ds' % eta\n\n info = ' - ETA: %s' % eta_format\n else:\n if time_per_unit >= 1:\n info += ' %.0fs/step' % time_per_unit\n elif time_per_unit >= 1e-3:\n info += ' %.0fms/step' % (time_per_unit * 1e3)\n else:\n info += ' %.0fus/step' % (time_per_unit * 1e6)\n\n for k in self._values_order:\n info += ' - %s:' % k\n if isinstance(self._values[k], list):\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if abs(avg) > 1e-3:\n info += ' %.4f' % avg\n else:\n info += ' %.4e' % avg\n else:\n if 'lr' in k:\n info += ' %.3e' % self._values[k]\n else:\n info += ' %s' % self._values[k]\n\n self._total_width += len(info)\n if prev_total_width > self._total_width:\n info += (' ' * (prev_total_width - self._total_width))\n\n if self.target is not None and current >= self.target:\n info += '\\n'\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n elif self.verbose == 2:\n if self.target is None or current >= self.target:\n for k in self._values_order:\n info += ' - %s:' % k\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if avg > 1e-3:\n info += ' %.4f' % avg\n else:\n info += ' %.4e' % avg\n info += '\\n'\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n self._last_update = now", "def update(self):\n changed = False\n\n value = AnalogInput.read(self) #\n\n delta = abs(value - self.oldValue)\n\n if delta > self.threshold:\n changed = True\n #oldValue is updated only if changed = True\n self.oldValue = value\n # Value is rescaled and changed in to an int\n # rescale() is not used for this, to avoid one extra call\n # Value is rounded to int using int(). It is faster than round(), \n # though loses accuracy, as it is always rounded down.\n value = int(value * (self.maximum-self.minimum) + self.minimum)\n return value, changed, self.name\n else:\n return None, changed, self.name", "def on_cam_base_pitch_hSlider_valueChanged(self, value):\n self.cam_base_pitch_ledit.setText(str(100 + value))", "def calc_stat_values(self):", "def _battery_cb(self, msg):\n # self.battery_voltages[msg.header.seq %\n # len(self.battery_voltages)] = msg.voltage\n self.battery_voltages[msg.header.seq % len(\n self.battery_voltages)] = msg.percentage * 100.\n # delta = self.INIT_VOLTAGE - self.MINIMUM_VOLTAGE\n # self.low_battery = (np.mean(self.battery_voltages) <=\n # (self.MINIMUM_VOLTAGE +\n # (0.1 * delta))) and (self._current_wp != 0)\n self.low_battery = (np.mean(self.battery_voltages) <=\n self.MINIMUM_VOLTAGE * 1.5) and (self._current_wp\n != 0)", "def calibrate_hall_sensors(self, hysteresis=20):\n self.send_packet('\\x24' + str([hysteresis])) # CHECK", "def _baseline_value(self):\n t = self['primary']\n return np.median(t.data[:int(10e-3/t.dt)])", "def compare(self):\n self.PotTax_increase = self.PotTax_intervention - self.PotTax_reference\n self.PotTax_percentage = (\n (self.PotTax_increase / self.PotTax_reference) * 100)\n \"\"\"\n # this sets the PotTax_percentage to actual percentages.\n self.PotTax_percentage['TFI'] = pd.Series(\n [\"{0:.2f}%\".format(val * 100) for val in\n self.PotTax_percentage['TFI']],\n index = self.PotTax_percentage.index)\n \"\"\"\n return", "def updateThresholdValues (self, DoubleSlider, Node, ThMax ):\n DoubleSlider.Slider.minimum = 0\n DoubleSlider.SpinBoxL.setRange(0,ThMax)\n DoubleSlider.Slider.maximum = ThMax\n DoubleSlider.SpinBoxR.setRange(0,ThMax)\n if ThMax!=0:\n DisplayNode = Node.GetScalarVolumeDisplayNode()\n LowerThreshold = DisplayNode.GetLowerThreshold()\n UpperThreshold = DisplayNode.GetUpperThreshold()\n DoubleSlider.Slider.minimumValue = LowerThreshold\n DoubleSlider.Slider.maximumValue = UpperThreshold \n DoubleSlider.SpinBoxL.blockSignals(True)\n DoubleSlider.SpinBoxR.blockSignals(True)\n DoubleSlider.SpinBoxL.value = LowerThreshold\n DoubleSlider.SpinBoxR.value = UpperThreshold\n DoubleSlider.SpinBoxL.blockSignals(False)\n DoubleSlider.SpinBoxR.blockSignals(False)", "def update_percent(self):", "def on_cam_base_yaw_hSlider_valueChanged(self, value):\n self.cam_base_yaw_ledit.setText(str(50 + value))", "def test_change_brightness_back_to_10():", "def update(self, current, values=None):\n values = values or []\n for k, v in values:\n if k not in self._values_order:\n self._values_order.append(k)\n if k not in self.stateful_metrics:\n if k not in self._values:\n self._values[k] = [\n v * (current - self._seen_so_far),\n current - self._seen_so_far,\n ]\n else:\n self._values[k][0] += v * (current - self._seen_so_far)\n self._values[k][1] += current - self._seen_so_far\n else:\n # Stateful metrics output a numeric value. This representation\n # means \"take an average from a single value\" but keeps the\n # numeric formatting.\n self._values[k] = [v, 1]\n self._seen_so_far = current\n\n now = time.time()\n info = \" - %.0fs\" % (now - self._start)\n if self.verbose == 1:\n if (\n now - self._last_update < self.interval\n and self.target is not None\n and current < self.target\n ):\n return\n\n prev_total_width = self._total_width\n if self._dynamic_display:\n sys.stdout.write(\"\\b\" * prev_total_width)\n sys.stdout.write(\"\\r\")\n else:\n sys.stdout.write(\"\\n\")\n\n if self.target is not None:\n numdigits = int(np.log10(self.target)) + 1\n bar = (\"%\" + str(numdigits) + \"d/%d [\") % (current, self.target)\n prog = float(current) / self.target\n prog_width = int(self.width * prog)\n if prog_width > 0:\n bar += \"=\" * (prog_width - 1)\n if current < self.target:\n bar += \">\"\n else:\n bar += \"=\"\n bar += \".\" * (self.width - prog_width)\n bar += \"]\"\n else:\n bar = \"%7d/Unknown\" % current\n\n self._total_width = len(bar)\n sys.stdout.write(bar)\n\n if current:\n time_per_unit = (now - self._start) / current\n else:\n time_per_unit = 0\n if self.target is not None and current < self.target:\n eta = time_per_unit * (self.target - current)\n if eta > 3600:\n eta_format = \"%d:%02d:%02d\" % (\n eta // 3600,\n (eta % 3600) // 60,\n eta % 60,\n )\n elif eta > 60:\n eta_format = \"%d:%02d\" % (eta // 60, eta % 60)\n else:\n eta_format = \"%ds\" % eta\n\n info = \" - ETA: %s\" % eta_format\n else:\n if time_per_unit >= 1 or time_per_unit == 0:\n info += \" %.0fs/%s\" % (time_per_unit, self.unit_name)\n elif time_per_unit >= 1e-3:\n info += \" %.0fms/%s\" % (time_per_unit * 1e3, self.unit_name)\n else:\n info += \" %.0fus/%s\" % (time_per_unit * 1e6, self.unit_name)\n\n for k in self._values_order:\n info += \" - %s:\" % k\n if isinstance(self._values[k], list):\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if abs(avg) > 1e-3:\n info += \" %.4f\" % avg\n else:\n info += \" %.4e\" % avg\n else:\n info += \" %s\" % self._values[k]\n\n self._total_width += len(info)\n if prev_total_width > self._total_width:\n info += \" \" * (prev_total_width - self._total_width)\n\n if self.target is not None and current >= self.target:\n info += \"\\n\"\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n elif self.verbose == 2:\n if self.target is not None and current >= self.target:\n numdigits = int(np.log10(self.target)) + 1\n count = (\"%\" + str(numdigits) + \"d/%d\") % (current, self.target)\n info = count + info\n for k in self._values_order:\n info += \" - %s:\" % k\n avg = np.mean(self._values[k][0] / max(1, self._values[k][1]))\n if avg > 1e-3:\n info += \" %.4f\" % avg\n else:\n info += \" %.4e\" % avg\n info += \"\\n\"\n\n sys.stdout.write(info)\n sys.stdout.flush()\n\n self._last_update = now", "def test_down_using_trendline(self):\n assert setup.setup_component(\n self.opp,\n \"binary_sensor\",\n {\n \"binary_sensor\": {\n \"platform\": \"trend\",\n \"sensors\": {\n \"test_trend_sensor\": {\n \"entity_id\": \"sensor.test_state\",\n \"sample_duration\": 10000,\n \"min_gradient\": 1,\n \"max_samples\": 25,\n \"invert\": \"Yes\",\n }\n },\n }\n },\n )\n self.opp.block_till_done()\n\n now = dt_util.utcnow()\n for val in [30, 20, 30, 10]:\n with patch(\"openpeerpower.util.dt.utcnow\", return_value=now):\n self.opp.states.set(\"sensor.test_state\", val)\n self.opp.block_till_done()\n now += timedelta(seconds=2)\n\n state = self.opp.states.get(\"binary_sensor.test_trend_sensor\")\n assert state.state == \"on\"\n\n for val in [30, 0, 45, 50]:\n with patch(\"openpeerpower.util.dt.utcnow\", return_value=now):\n self.opp.states.set(\"sensor.test_state\", val)\n self.opp.block_till_done()\n now += timedelta(seconds=2)\n\n state = self.opp.states.get(\"binary_sensor.test_trend_sensor\")\n assert state.state == \"off\"", "def calculatePercentChange(self, oldValue, newValue):\n return (((newValue - oldValue)/oldValue)*100)", "def update_temp(self):\n\t\tcurrent_temp = self.thin.temperature\n\t\toutside_temp = self.outside.temperature\n\t\tself.thin.temperature = current_temp + 0.01*self.config.valve_coef*self.thin._actuation_value - self.config.out_temp_coef*(current_temp - outside_temp)", "def __init__(self):\n super().__init__()\n self.TERRAIN_VARIANCE = 0.0", "def getRawThrottle():\n val = leftDriverStick.getY()\n if val != 0.0:\n val *= -1.0\n return val" ]
[ "0.6339183", "0.62632155", "0.6128972", "0.5886148", "0.5862159", "0.58352584", "0.57543653", "0.5740762", "0.56626266", "0.5647609", "0.56101316", "0.55523014", "0.5543149", "0.5527939", "0.55186856", "0.5512248", "0.55031335", "0.5467484", "0.5460671", "0.5455734", "0.54495096", "0.5418579", "0.5380533", "0.5378996", "0.53784746", "0.5360961", "0.53588396", "0.53480625", "0.5311486", "0.5311258" ]
0.73394936
0
Test that you are able to retrieve a list of all users ranked by win percentage
def test_get_user_rankings(self): user = User(name=u'no win', email=u'[email protected]') user.put() userone = User(name=u'one win', email=u'[email protected]', total_played=1, wins=1) userone.put() usertwo = User(name=u'two wins', email=u'[email protected]', total_played=2, wins=1) usertwo.put() user_rankings = users.get_user_rankings() self.assertEquals(len(user_rankings), 2) self.assertEquals(user_rankings.pop().key, usertwo.key) self.assertEquals(user_rankings.pop().key, userone.key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_rankings(self, request):\n users = User.query(User.total_games > 0).fetch()\n users = sorted(users, key=lambda x: x.win_percentage, reverse=True)\n return UserForms(items=[user.to_form() for user in users])", "def testHighscore(self):\n \n game = Game.objects.get(title='game0')\n gameplayeds = game.gameplayed_set\n ply_group = Group.objects.get(name='Player')\n for i in range(4):\n user = ply_group.user_set.get(username='ply{}'.format(i))\n gameplayed = gameplayeds.get(user=user)\n gameplayed.gameScore = i\n gameplayed.save()\n \n response = self.client.get(\n reverse('api:game-buyers', args=['v1', 'game0']),\n {'order_by': 'gameScore'},\n format='json'\n )\n self.assertEquals(response.status_code, 200)\n content = self.parser.parse(BytesIO(response.content))\n for i in range(4):\n self.assertEquals(content['results'][i]['user'], 'ply{}'.format(i))\n \n response = self.client.get(\n reverse('api:game-buyers', args=['v1', 'game0']),\n {'order_by': '-gameScore'},\n format='json'\n )\n self.assertEquals(response.status_code, 200)\n content = self.parser.parse(BytesIO(response.content))\n for i in range(4):\n self.assertEquals(content['results'][i]['user'], 'ply{}'.format(3 - i))", "def get_user_rankings(self, request):\n difficulty = validateGameDifficultyValue(request, True)\n users = User.query().fetch()\n items = []\n total_games = 0\n wins = 0\n for user in users:\n scores = Score.query()\n # Filter by game difficulty and only wons that resulted in a win\n scores = scores.filter(Score.user == user.key, \\\n Score.difficulty == difficulty)\n scores.fetch()\n\n for score in scores:\n total_games += 1\n if score.won:\n wins += 1\n if total_games > 0:\n items.append(\n RankingForm(user_name=user.name,\n difficulty=getattr(GameDifficulty, difficulty),\n win_percentage=round(float(wins)/total_games*100, 2),\n wins=wins)\n )\n total_games = 0\n wins = 0\n return RankingForms(items=items)", "def test_get_total_users_get(self):\n pass", "def test_ranking_achievements(self):\n client = Client()\n user = create_user('passwordmichu', 'michu')\n client.login(username='michu', password='passwordmichu')\n coll = create_collection('Coleccion de cartas')\n problem = create_select_problem(coll, 'Problema')\n create_an_achievement_of_each(coll)\n submit_select_url = reverse('judge:submit', args=[problem.pk])\n client.post(submit_select_url, {'code': problem.solution}, follow=True)\n ranking_url = reverse('judge:result', args=[coll.pk])\n group_a = create_group('1A')\n group_a.user_set.add(user)\n response = client.get(ranking_url, follow=True)\n self.assertIn('x5', response.content.decode('utf-8'))", "def get_fb_ind_rankings(self):\n\n ranks = []\n self._logger.debug(\"Getting foosball individual rankings\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT player_id, first_name, last_name, \\\nnickname FROM player\")\n players = cursor.fetchall()\n\n for player_id, first_name, last_name, nickname in players:\n cursor.execute(\"SELECT fb_offense_rating, fb_defense_rating FROM \\\nplayer WHERE player_id = {0}\".format(player_id))\n offense_rating, defense_rating = cursor.fetchall()[0]\n\n cursor.execute(\"SELECT mu, sigma FROM rating WHERE rating_id \\\n= {0}\".format(offense_rating))\n mu, sigma = cursor.fetchall()[0]\n\n offense_rank = float(mu) - (3 * float(sigma))\n cursor.execute(\"SELECT mu, sigma FROM rating WHERE rating_id \\\n= {0}\".format(defense_rating))\n mu, sigma = cursor.fetchall()[0]\n\n defense_rank = float(mu) - (3 * float(sigma))\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\noffense_winner = {0}\".format(player_id))\n offense_win_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\ndefense_winner = {0}\".format(player_id))\n defense_win_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\noffense_loser = {0}\".format(player_id))\n offense_lose_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\ndefense_loser = {0}\".format(player_id))\n defense_lose_count = cursor.fetchone()[0]\n\n intermediate_rank = (first_name, last_name, nickname,\n 'Offense', round(offense_rank, 4), offense_win_count,\n offense_lose_count)\n ranks.append(intermediate_rank)\n del intermediate_rank\n intermediate_rank = (first_name, last_name, nickname,\n 'Defense', round(defense_rank, 4), defense_win_count,\n defense_lose_count)\n ranks.append(intermediate_rank)\n del intermediate_rank\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return ranks", "def test_get_roles_rank(self):\n contrib_as = self.make_assignment(\n self.project, self.user_bob, self.role_contributor\n )\n guest_as = self.make_assignment(\n self.project, self.user_carol, self.role_guest\n )\n roles = self.project.get_roles()\n self.assertIn(contrib_as, roles)\n self.assertIn(guest_as, roles)\n roles = self.project.get_roles(max_rank=30)\n self.assertEqual(roles, [contrib_as, self.owner_as_cat])\n roles = self.project.get_roles(max_rank=40)\n self.assertEqual(roles, [contrib_as, guest_as, self.owner_as_cat])\n roles = self.project.get_roles(min_rank=30)\n self.assertEqual(roles, [contrib_as, guest_as])\n roles = self.project.get_roles(min_rank=40)\n self.assertEqual(roles, [guest_as])\n roles = self.project.get_roles(min_rank=30, max_rank=30)\n self.assertEqual(roles, [contrib_as])", "def set_ranking_users():\n data = select_data_source()\n order_id = data['id']\n from_user = data['from']\n to_user = data['to']\n ranking = float(data['ranking']) # TODO.\n \n if session._id != from_user and session._id != to_user : return permission_denied_return\n \n db = database.getdb()\n \n if not float.is_integer(ranking) or ranking <= 0 or 10 < ranking :\n return ranking_invalid_value_return\n \n ### Check if order exists.\n \n cmd = 'select * from orders where id==\"{0}\"'.format(order_id)\n order_info = db.execute(cmd).fetchall()\n if len(order_info) == 0 :\n return ranking_not_exist_return\n \n ### Check if user is valid.\n cmd = 'select owner, customer from orders where id==\"{0}\"'.format(order_id)\n owner, customer = db.execute(cmd).fetchall()[0]\n \n ### Check and setup ranked info in orders.\n \n if from_user == owner and to_user == customer :\n cmd = 'select owner_ranked from orders where id==\"{0}\"'.format(order_id)\n is_ranked = db.execute(cmd).fetchall()[0][0]\n if is_ranked != 0 :\n return ranking_already_ranked_return\n cmd = 'update orders set owner_ranked=1'\n db.execute(cmd)\n db.commit()\n elif from_user == customer and to_user == owner :\n cmd = 'select customer_ranked from orders where id==\"{0}\"'.format(order_id)\n is_ranked = db.execute(cmd).fetchall()[0][0]\n if is_ranked != 0 :\n return ranking_already_ranked_return\n cmd = 'update orders set customer_ranked=1'\n db.execute(cmd)\n db.commit()\n else :\n return ranking_not_relative_return\n \n ### Update rank to to_user.\n \n cmd = 'select rank, rank_time from users where mail==\"{0}\"'.format(to_user)\n rank, rank_time = db.execute(cmd).fetchall()[0]\n \n rank = (rank * rank_time + ranking) / (rank_time + 1)\n rank_time += 1\n \n cmd = 'update users set rank={0}, rank_time={1} where mail=\"{2}\"'.format(rank, rank_time, to_user)\n db.execute(cmd)\n db.commit()", "def test_user_stats(self):\r\n res = self.testapp.get(u'/api/v1/stats/users',\r\n status=200)\r\n data = json.loads(res.body)\r\n self.assertTrue(\r\n 'count' in data,\r\n \"Should have user count: \" + str(data))\r\n self.assertTrue(\r\n 'activations' in data,\r\n \"Should have pending user activations: \" + str(data))\r\n self.assertTrue(\r\n 'with_bookmarks' in data,\r\n \"Should have count of users with bookmarks: \" + str(data))", "def calculate_scores():\n all_people = models.Leaderboard.query.order_by(\n models.Leaderboard.score.desc()).all()\n print(all_people)\n users = []\n scores = []\n for person in all_people:\n users.append(person.username)\n scores.append(person.score)\n return users, scores", "def get_user_rankings(self, req):\n return msgs.UserRanks(ranks=[user.getRank() for user in models.User.query().fetch()])", "def get_fb_team_rankings(self):\n\n ranks = []\n self._logger.debug(\"Getting foosball team rankings\")\n\n try:\n self.check_if_db_connected()\n cursor = self._db_conn.cursor()\n cursor.execute(\"SELECT team_id, team_name FROM team\")\n teams = cursor.fetchall()\n\n for team_id, team_name in teams:\n cursor.execute(\"SELECT fb_team_rating FROM \\\nteam WHERE team_id = {0}\".format(team_id))\n team_rating = cursor.fetchall()[0]\n\n cursor.execute(\"SELECT mu, sigma FROM rating WHERE rating_id \\\n= {0}\".format(team_rating[0]))\n mu, sigma = cursor.fetchall()[0]\n\n team_rank = float(mu) - (3 * float(sigma))\n\n # get player_ids\n cursor.execute(\"SELECT player from player_team_xref \\\nWHERE team = {0}\".format(team_id))\n players = cursor.fetchall()\n player_one = players[0]\n player_two = players[1]\n\n cursor.execute(\"SELECT first_name FROM player WHERE \\\nplayer_id = {0}\".format(player_one[0]))\n player_one_name = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT first_name FROM player WHERE \\\nplayer_id = {0}\".format(player_two[0]))\n player_two_name = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\n(offense_winner = {0} AND defense_winner = {1}) OR (offense_winner = {1} \\\nAND defense_winner = {0})\".format(player_one[0], player_two[0]))\n team_win_count = cursor.fetchone()[0]\n\n cursor.execute(\"SELECT COUNT(result_id) FROM fb_result WHERE \\\n(offense_loser = {0} AND defense_loser = {1}) OR (offense_loser = {1} \\\nAND defense_loser = {0})\".format(player_one[0], player_two[0]))\n team_loss_count = cursor.fetchone()[0]\n\n intermediate_rank = (team_name, round(team_rank, 4),\n team_win_count, team_loss_count, player_one_name,\n player_two_name)\n ranks.append(intermediate_rank)\n del intermediate_rank\n\n except MySQLdb.OperationalError:\n self._logger.error(\"MySQL operational error occured\")\n traceback.print_exc()\n raise exceptions.DBConnectionError(\"Cannot connect to MySQL server\")\n\n except MySQLdb.ProgrammingError:\n self._logger.error(\"MySQL programming error\")\n traceback.print_exc()\n raise exceptions.DBSyntaxError(\"MySQL syntax error\")\n\n else:\n return ranks", "def test_overall_report_banner_users():\n assert (overall_data['banner_report']['data'][0][0] == 'Users')\n for num in overall_data['banner_report']['data'][0][1:]:\n assert (num == 90)", "def get_user_rankings(self, request):\n users = User.query().order(-User.win_ratio)\n return RankForms(items=[user.rank_form() for user in users])", "def test_get_ranked_user_commit_data(self):\n data = self.leaderboard.get_ranked_user_commit_data()\n print(data)", "def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent", "def test_multiple_players_return_own_results(self):\r\n observed = highscore.highscore(self.fixture_player1, self.fixture_score_high1)\r\n self.assertAlmostEquals(observed, self.fixture_score_high1)\r\n observed = highscore.highscore(self.fixture_player2, self.fixture_score_high2)\r\n self.assertAlmostEquals(observed, self.fixture_score_high2)", "def test_get_ranking(self):\n card = Card.objects.create(suit=Card.CLUB, rank=\"jack\")\n self.assertEqual(card.get_ranking(), 11)", "def test_task_count_user_total(self):\r\n tasks.count_total_each_user()\r\n\r\n stats = StatBookmark.query.all()\r\n\r\n expected = {\r\n 'admin': 0,\r\n self.username: 4,\r\n self.new_username: 3,\r\n }\r\n\r\n for stat in stats:\r\n user_key = stat.attrib.split('_')\r\n username = user_key[2]\r\n self.assertTrue(username in expected)\r\n self.assertEqual(expected[username], stat.data)", "def test_percentage_lost_weight(self):\n user_created = self.create_user()\n percentage_return = self.new_calculation.percentage_lost_weight(user_created)\n\n self.assertEqual(percentage_return, 10)\n self.assertEqual(type(percentage_return), int)", "def cal_hit_ratio(self):\n full, top_k = self._subjects, self._top_k\n top_k = full[full['rank']<=top_k]\n score = 0.0\n # golden items hit in the top_K items\n score_1 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==1.0)])) for i,d in top_k.groupby('user')])\n score_2 = sum([(len(d[(d['item'].isin(self._test_items[d['user'].iloc[0]]))& (d['ratings']==0.0)])) for i,d in top_k.groupby('user')])\n score = score_1 - score_2\n return score/full['user'].nunique()", "def test_something(self):\n\n allure.dynamic.title(\"Testing compute_ranks\")\n allure.dynamic.severity(allure.severity_level.NORMAL)\n allure.dynamic.description_html('<h3>Codewars badge:</h3>'\n '<img src=\"https://www.codewars.com/users/myFirstCode'\n '/badges/large\">'\n '<h3>Test Description:</h3>'\n \"<p>Test the function taht organizes a sports league in a \"\n \"round-robin-system. Each team meets all other teams. \"\n \"In your league a win gives a team 2 points, a draw gives \"\n \"both teams 1 point. After some games you have to compute \"\n \"the order of the teams in your league. You use the following \"\n \"criteria to arrange the teams:</p>\"\n \"<ul><li>- Points</li>\"\n \"<li>- Scoring differential (the difference between goals \"\n \"scored and those conceded)</li>\"\n \"<li>- Goals scored</li></ul>\")\n\n test_data = [\n (6,\n [[0, 5, 2, 2],\n [1, 4, 0, 2],\n [2, 3, 1, 2],\n [1, 5, 2, 2],\n [2, 0, 1, 1],\n [3, 4, 1, 1],\n [2, 5, 0, 2],\n [3, 1, 1, 1],\n [4, 0, 2, 0]],\n [4, 4, 6, 3, 1, 2]),\n (6,\n [[0, 5, 2, 0],\n [1, 4, 2, 2],\n [2, 3, 1, 3],\n [1, 5, 0, 0],\n [2, 0, 2, 1],\n [3, 4, 3, 1]],\n [2, 3, 4, 1, 5, 6]),\n (4,\n [[0, 3, 1, 1],\n [1, 2, 2, 2],\n [1, 3, 2, 0],\n [2, 0, 2, 0]],\n [3, 1, 1, 3]),\n (10,\n [],\n [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]),\n (8,\n [[0, 7, 2, 0]],\n [1, 2, 2, 2, 2, 2, 2, 8])\n ]\n\n for data in test_data:\n number = data[0]\n games = data[1]\n expected = data[2]\n actual_result = compute_ranks(number, games)\n print_log(number=number,\n games=games,\n expected=expected,\n actual_result=actual_result)\n\n with allure.step(\"Enter a test data and verify the result:\"):\n self.assertEqual(expected, actual_result)", "def per100_top_stat_players(game_type, stat, player_pk, excluded_pks, season_id=None):\n season = None\n if season_id:\n season = bmodels.Season.objects.get(id=season_id)\n\n if player_pk:\n players = bmodels.Player.objects.filter(pk=player_pk)\n else:\n players = bmodels.Player.objects.all().exclude(\n Q(first_name__contains=\"Team\") | Q(pk__in=excluded_pks))\n player_list = []\n for player in players:\n if season:\n result = player.statline_set.filter(game__game_type=game_type, game__date__range=(\n season.start_date, season.end_date)).aggregate(Sum(stat), Sum('off_pos'))\n else:\n result = player.statline_set.filter(\n game__game_type=game_type).aggregate(Sum(stat), Sum('off_pos'))\n if result['off_pos__sum'] and result['off_pos__sum'] is not 0:\n percentage = (result[stat + '__sum'] /\n result['off_pos__sum']) * 100\n else:\n percentage = 0.0\n player_list.append((player.first_name, percentage))\n return sorted(player_list, key=lambda x: x[1], reverse=True)", "def give_round_scores(list_of_players):\n print(\"\\nThe round has ended !\\nWe shall now unveil the cards and the scores!\")\n\n for player in list_of_players:\n cards = [card.name for card in player.cards]\n cards_string = \" \"\n for card in cards:\n cards_string += card + \", \"\n cards_string = cards_string[:-2]\n print(\"\\n{} has these cards: \".format(player.name), cards_string)\n print(\"{} has a score of {}\".format(player.name, player.score()))\n final_scores = [player.score() for player in list_of_players]\n min_score = min(final_scores)\n winners_index = [i for i, x in enumerate(final_scores) if x == min_score]\n if len(winners_index) == 1:\n index_winner = winners_index[0]\n winner = list_of_players[index_winner]\n print(winner.name, \"won the round with a score of {}\".format(winner.score()))\n if len(winners_index) > 1:\n print(\"It's a tie!\")\n winners_names = \"\"\n winners = [list_of_players[i] for i in winners_index]\n for winner in winners:\n winners_names += winner.name\n print(winners_names, \"won the round with a score of \", str(min_score))", "def test_sample_users():\n ratings = lktu.ml_test.ratings\n ratings = ratings.set_index('user') ##forces non-unique index\n with pytest.raises(ValueError):\n for split in xf.sample_users(ratings, 5, 100, xf.SampleN(5)):\n pass", "async def ranks(self, ctx):\n database = db[str(ctx.guild.id)]\n all_users = database.find({}).sort('xp', -1)\n\n rank = ''\n index = 1\n for doc in all_users:\n try:\n user = self.client.get_user(doc['user_id'])\n if user is not None and not user.bot:\n s = f'**{index})** {user.display_name}: {doc[\"xp\"]} XP | Level: {get_level_from_xp(doc[\"xp\"])}\\n'\n rank += s\n else:\n continue\n except KeyError:\n continue\n index += 1\n\n rank_embed = discord.Embed(title='Rank', description=rank, color=discord.Color(random.randint(1, 16777215)))\n await ctx.send(embed=rank_embed)", "def get_user_rankings(self, request):\n users = User.query().order(-User.average_score)\n return RankForms(items=[user.to_rank_form() for user in users])", "def test_new_user_returns_score(self):\r\n observed = highscore.highscore(self.fixture_player, self.fixture_score_high)\r\n self.assertEquals(observed, self.fixture_score_high)", "def test_user_get_scores():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n r = client.get('/scores')\n assert r.status_code == 200\n destroy_ctfd(app)", "def test_rank(rank_list, word, n, test='bw'):\n tests = ['bw', 'ks']\n if test not in test:\n raise ValueError('Invalid test used. Need to choose either {} or {}\\\n tests.'.format(*tests))\n\n _, base_check = zip(*sorted(rank_list[word]))\n # just subtract the mean of the dataset, to make the mean 0\n std_base_check = [data - np.mean(base_check) for data in base_check]\n \n print(std_base_check)\n\n words = []\n for key, values in rank_list.items():\n if key == word:\n continue\n\n _, test_check = zip(*sorted(values))\n # move the data to a mean of 0\n std_test_check = [data - np.mean(test_check) for data in test_check]\n if test == 'bw':\n score = brendan_whitney_test(std_base_check, std_test_check)\n else:\n score, _ = ks_2samp(std_base_check, std_test_check)\n \n words.append((score, key))\n \n words = sorted(words)\n print(words[:n])\n return words[:n]" ]
[ "0.6441626", "0.6410253", "0.63942116", "0.6246523", "0.6229379", "0.61177903", "0.609688", "0.60868376", "0.6083801", "0.6032084", "0.5962515", "0.59496826", "0.5942009", "0.58788043", "0.5840735", "0.58205724", "0.57919294", "0.57524", "0.5742837", "0.5739226", "0.5737768", "0.5735706", "0.5728616", "0.571925", "0.57124203", "0.57089365", "0.5707287", "0.5686002", "0.56495106", "0.5641554" ]
0.71827596
0
General Gaussian elimination. Solve Av = b, for `v`. `A` is a square matrix with dimensions (n,n) and `b` has dim (n,)
def gaussian_elimination(A, b): n = len(b) # Join A and b ab = np.c_[A,b] # Gaussian Elimination for i in range(n-1): if ab[i,i] == 0: raise ZeroDivisionError('Zero value in matrix..') for j in range(i+1, n): ratio = ab[j,i] / ab[i,i] for k in range(i, n+1): ab[j,k] = ab[j,k] - ratio * ab[i,k] # Backward Substitution X = np.zeros((n,1)) X[n-1,0] = ab[n-1,n] / ab[n-1,n-1] for i in range(n-2,-1,-1): knowns = ab[i, n] for j in range(i+1, n): knowns -= ab[i,j] * X[j,0] X[i,0] = knowns / ab[i,i] return X
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gaussian_elimination(A, b):\n \n m, n = A.shape\n U = A.copy() \n b = b.copy()\n\n # forward sweep, reduce A to a upper triangular matrix\n for k in range(min(m, n)):\n swap = np.argmax(np.abs(U[k:, k])) + k\n if U[swap, k] == 0:\n raise ValueError('Singular matrix')\n U[[k, swap], :] = U[[swap, k], :]\n b[[k, swap]] = b[[swap, k]]\n \n for i in range(k + 1, m):\n factor = U[i, k] / U[k, k]\n b[i] = b[i] - factor*b[k]\n U[i, k+1:] = U[i, k+1:] - U[k, k+1:] * factor\n U[i, k] = 0\n \n # solve by back subistitution\n x = rbackwardsolve(U, b, m)\n\n return x", "def gaussian_elimination_pivots(A, b):\n\n P, L, U = PLU(A)\n n,_ = A.shape\n y = rforwardsolve(L, (P.T).dot(b), n)\n x = rbackwardsolve(U, y, n)\n\n return x", "def gaussian_elimination_special_case(b):\n n = len(b)\n # init new (prime) arrays\n beta_prime = np.empty(n)\n beta_prime[0] = 2\n\n b_prime = np.empty(n)\n b_prime[0] = b[0]\n\n v = np.empty(n)\n i_array = np.arange(n)\n beta_prime = (i_array+2) / (i_array+1)\n\n for i in range(1,n):\n b_prime[i] = b[i] + (b_prime[i-1] / beta_prime[i-1])\n\n v[-1] = b_prime[-1] / beta_prime[-1]\n\n for i in range(n-2, -1, -1):\n v[i] = (b_prime[i] + v[i+1])/ beta_prime[i]\n\n return v", "def gaussElimin(a,b):\n a=float64(a)\n b=float64(b)\n n=len(b)\n x=zeros((n,1),dtype=float)\n for k in range(n-1):\n for i in range(k+1,n):\n l=float(a[i][k])/a[k][k]\n\t a[i][k]=0\n\t for j in range(k+1,n):\n\t a[i][j]=a[i][j]-l*a[k][j]\n\t b[i]=b[i]-l*b[k]\n x[n-1]=float(b[n-1])/a[n-1][n-1]\n for i in range(n-2,-1,-1):\n sum=b[i]\n for j in range(i+1,n):\n sum=sum-a[i][j]*x[j]\n x[i]=float(sum)/a[i][i]\n return x", "def gaussian_solve(a, b):\n g = np.zeros((len(a), len(a[0]) + len(b[0])))\n for i in range(len(a)):\n for j in range(len(a[0])):\n g[i][j] = a[i][j]\n for i in range(len(b)):\n for j in range(len(b[0])):\n g[i][j + len(a[0])] = b[i][j]\n for i in range(len(a)):\n for j in range(i+1, len(a)):\n row1 = g[i]\n row2 = g[j]\n if row1[i] != 0:\n q = row2[i] / row1[i]\n g[j] = row2 - q * row1\n for i in range(len(a)):\n i = len(a) - i - 1\n for j in range(i):\n j = i - j - 1\n row1 = g[i]\n row2 = g[j]\n if row1[i] != 0:\n q = row2[i] / row1[i]\n g[j] = row2 - q * row1\n if g[i][i] != 0:\n g[i] /= g[i][i]\n else:\n return 'error: matrix is not linearly independent'\n out = np.zeros((len(b), len(b[0])))\n for i in range(len(b)):\n for j in range(len(b[0])):\n out[i][j] = g[i][j + len(a[0])]\n return out", "def linear_least_squares(M, v):\n \n B = copy(M)\n [m,n] = shape(B)\n if rank(B) != min(m,n):\n print('Warning: can not be solved since the rank of the matrix is not its maximum value')\n return nan\n else:\n \n A = copy(M)\n At = transpose(M)\n b = copy(v)\n b = transpose(b)\n \n AtA = dot(At, A)\n Atb = transpose(dot(At, b))\n print(AtA, Atb)\n \n x = gauss_elimination(AtA, Atb)\n print('x*:')\n return x", "def solve(self,b):\n nrows = self.nrows\n ncols = self.ncols\n newmatrix = Matrix(nrows,ncols+b.ncols) #Account for b not being just a column vector\n for i in range(nrows):\n for j in range(ncols):\n newmatrix[i,j]= self[i,j]\n for j in range(b.ncols):\n newmatrix[i,ncols+j] = b[i,j]\n newmatrix.gaussianelimination()\n x = Matrix(nrows,b.ncols)\n for i in range(x.nrows):\n for j in range(b.ncols):\n x[i,j] = newmatrix[i,j+ncols]\n return x", "def gauss_naive (M, b) -> list:\n dim = len(b)\n\n #Itero sulle Incognite da Trovare\n for i in range(dim):\n\n #Itero sulle righe su cui devo cancellare un elemento\n for j in range(i+1,dim):\n m__j_i = M[j][i] / M[i][i]\n M[j][i] = 0.0\n\n for k in range (i+1,dim):\n M[j][k] = M[j][k] - m__j_i * M[i][k]\n \n b[j] = b[j] - m__j_i * b[i]\n\n return M,b", "def Gauss_Seidel_Solve(A,b,tol=1.0e-6,max_iterations=100,LOUD=False):\n [Nrow, Ncol] = A.shape\n assert Nrow == Ncol\n N = Nrow\n converged = False\n iteration = 1\n x = np.random.rand(N) #random initial guess \n x_new = np.zeros(N)\n while not(converged):\n x = x_new.copy() #replace old value\n for row in range(N):\n x_new[row] = b[row]\n for column in range(N):\n if column != row:\n #only change from before is that I use x_new in the update\n x_new[row] -= A[row,column]*x_new[column]\n x_new[row] /= A[row,row]\n relative_change = np.linalg.norm(x_new-x)/np.linalg.norm(x_new)\n if (LOUD):\n print(\"Iteration\",iteration,\": Relative Change =\",relative_change)\n if (relative_change < tol) or (iteration >= max_iterations):\n converged = True\n iteration += 1\n return x_new", "def gaussian_reduce(w, a, b):\n u = (0, 1)\n v = (1, 0)\n\n if dot(u, v, w, a, b) < 0:\n v = (-v[0], -v[1])\n\n if norm(u, w, a, b) < norm(v, w, a, b):\n u, v = v, u\n\n while norm(u, w, a, b) > norm(v, w, a, b):\n k = dot(u, v, w, a, b) // dot(v, v, w, a, b)\n u, v = v, (u[0]- k*v[0], u[1]- k*v[1])\n\n u, v = v, u\n\n if dot(u, v, w, a, b) < dot(v, v, w, a, b)/2 or norm((u[0]-v[0], u[1]-v[1]), w, a, b) > norm(v, w, a, b):\n c = v\n else:\n c = (u[0] - v[0], u[1] - v[1])\n\n return c[0]*w + b*c[1], c[0]", "def f(v,x):\n # dependent var order: v = [H, u, tau]'\n # 'mass' matrix:\n M = array([[ v[1], v[0], 0.0 ],\n [ -rg*v[0], 0.0, 1.0 ],\n [ 0.0, 0.0, 1.0 ] ]);\n #[ 0.0, 2.0 * nu0 * v[0], 0.0 ] ]);\n g = array([[ a * (v[0] - He) ],\n [ k * rg * v[0] * v[1] ],\n [ v[2] ] ]);\n vp = solve(M,g)\n return vp.flatten()", "def elimination(A, b):\n n = len(A)\n for j in range(n):\n if A[j][j] <= 0:\n raise ValueError('Matrix A is not positive definite.')\n A[j][j] = math.sqrt(A[j][j])\n b[j][0] = b[j][0] / A[j][j]\n for i in range(j + 1, n):\n A[i][j] = A[i][j] / A[j][j]\n b[i][0] = b[i][0] - A[i][j] * b[j][0]\n for k in range(j + 1, i + 1):\n A[i][k] = A[i][k] - A[i][j] * A[k][j]", "def sparse_gauss_seidel(A, b, tol=1e-8, maxiters=29):\n \n\n def iter(xi):\n xj=np.zeros((m,))\n for i in xrange(m): \n rowstart = A.indptr[i]\n rowend = A.indptr[i+1]\n aii=A[i,i]\n xj[i]=(b[i]-(np.dot(A.data[rowstart:rowend], xi[A.indices[rowstart:rowend]])-aii*xi[i]))/(aii)\n xi[i]=xj[i]\n return xj\n \n #Aix = np.dot(A.data[rowstart:rowend], x[A.indices[rowstart:rowend]])\n\n m=len(b)\n xk=np.zeros((m,))\n for i in xrange(0,maxiters):\n xk=iter(xk)\n if (la.norm(A.dot(xk)-b,ord=np.inf)<tol) or (i==maxiters-1):\n return xk", "def get_eigen_value(A, v):\n Av = np.dot(A, v)\n print(\"Mag v, should be 1:\", mag(v))\n lmb = mag(Av) / mag(v)\n return lmb", "def _exec_vector(self, a, bd, mask):\n\n npt = bd.shape[0]\n n = self.X_ADJUSTED.shape[0]\n zero_index = None\n zero_value = False\n\n a_inv = scipy.linalg.inv(a)\n\n if np.any(np.absolute(bd) <= self.eps):\n zero_value = True\n zero_index = np.where(np.absolute(bd) <= self.eps)\n\n b = np.zeros((npt, n+1, 1))\n b[:, :n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)\n if zero_value:\n b[zero_index[0], zero_index[1], 0] = 0.0\n b[:, n, 0] = 1.0\n\n if (~mask).any():\n mask_b = np.repeat(mask[:, np.newaxis, np.newaxis], n+1, axis=1)\n b = np.ma.array(b, mask=mask_b)\n\n x = np.dot(a_inv, b.reshape((npt, n+1)).T).reshape((1, n+1, npt)).T\n kvalues = np.sum(x[:, :n, 0] * self.VALUES, axis=1)\n sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1)\n\n return kvalues, sigmasq", "def fitbivarGaussian(data):\n params = bivarParams(data)\n errorfunction = lambda p: ravel(bivarGaussian(*p)(*indices(data.shape)) -\n data)\n p, success = optimize.leastsq(errorfunction, params)\n return p", "def singular_solve(U, e, V, b):\n # Calculate S * V^T * x = U^T * b\n y = np.dot(np.transpose(U), b)\n\n for i in range(len(y)):\n y[i] /= e[i]\n\n # Solve\n x = np.dot(V, y)\n\n return x", "def gauss_vect_mult(v):\n Jv = T.Rop(output, params, v)\n HJv = T.Rop(T.grad(opt_cost,output), output, Jv)\n JHJv = T.Lop(output, params, HJv)\n if not isinstance(JHJv,list):\n JHJv = [JHJv]\n JHJv = [a+ridge*b for a,b in zip(JHJv,v)]\n return JHJv", "def fgausbg(v,p):\n return np.exp(-0.5 * ((v[0] - p[0]) / p[1])**2) * p[2] + p[3]", "def soft_constraint ( self , var , value , name = '' , title = '' ) :\n \n assert isinstance ( var , ROOT.RooAbsReal ) ,\\\n \"Invalid ``v'': %s/%s\" % ( var , type ( var ) ) \n assert isinstance ( value , VE ),\\\n \"Invalid ``value'': %s/%s\" % ( value , type ( value ) )\n\n assert 0 < value.cov2() , 'Invalid error for %s' % value\n \n name = name if name else 'Gauss_%s_%s' % ( var.GetName() , self.name ) \n title = title if title else 'Gaussian Constraint(%s,%s) at %s' % ( var.GetName() , self.name , value )\n \n # value & error as RooFit objects: \n val = ROOT.RooFit.RooConst ( value.value () )\n err = ROOT.RooFit.RooConst ( value.error () )\n \n # Gaussian constrains \n gauss = ROOT.RooGaussian ( self.var_name ( name ) , title , var , val , err )\n \n # keep all the created technical stuff \n self.aux_keep.append ( val )\n self.aux_keep.append ( err )\n self.aux_keep.append ( gauss )\n\n self.info ('Constraint is created %s=%s' % ( var.name , value ) )\n return gauss", "def geneiv(A, B):\n Li = np.linalg.inv(linalg.cholesky(B).T)\n C = Li*A*(Li.T)\n C = np.asmatrix((C + C.T)*0.5, np.float32)\n\n eivs, V = np.linalg.eig(C)\n return eivs, Li.T*V", "def abv(og, fg):\n return abw(og, fg) * fg / 0.794", "def gauss_seidel(A, b, tol=1e-8, maxiters=100, plot=False):\n A=np.array(A)*1.0\n b=np.array(b)*1.0 \n m,n=A.shape\n e=[]\n xk=np.zeros((m,))\n \n def iter(xi):\n xj=np.zeros((m,))\n for i in xrange(m):\n xj[i]=(b[i]-(np.dot(A[i],xi)-A[i,i]*xi[i]))/A[i,i]\n xi[i]=xj[i]\n return xj\n\n if plot==True: \n for i in xrange(1,maxiters+1):\n e+=[la.norm(np.dot(A,xk)-b,ord=np.inf)]\n #print i-1,e[i-1],xk\n xk=iter(xk)\n if (la.norm(np.dot(A,xk)-b,ord=np.inf)<tol) or (i==maxiters):\n e+=[la.norm(np.dot(A,xk)-b,ord=np.inf)]\n break\n #How many iterations happened\n iters=len(e) #1..len(e)\n dom=np.arange(0,iters)\n \n plt.semilogy(dom,e,'b.-',basey=10,lw=2, ms=2)\n plt.xlabel(\"Iteration #\")\n plt.ylabel(\"Absolute Error of Approximation\")\n #plt.legend(loc=\"upper left\")\n plt.title(\"Convergence of Gauss-Seidel Method\", fontsize=18)\n plt.show()\n return xk\n \n else:\n for i in xrange(1,maxiters+1):\n xk=iter(xk)\n if (la.norm(np.dot(A,xk)-b,ord=np.inf)<tol) or (i==maxiters):\n return xk", "def func_gaussian(self, dmv, vpar):\n dmoff = dmv - vpar[0]\n sig = vpar[1]\n sig = sig * sig\n return np.exp(-0.5 * dmoff * dmoff / sig) * self.ThetaFunc(dmv)", "def gs(self, k=50):\n # a. initialize V1 to Vk as a matrix of zeros\n Vs = np.zeros((k, self.ATA.shape[0]), dtype=float)\n\n # initialize u_n as first eigen vector?\n # un = self.eigen_vectors[0]\n\n # looking for k largest eigenvalues and associated eigenvectors\n # of ATA\n # b. for i = 1 to k\n for i in tqdm(range(len(Vs))):\n print(\"Doing i\")\n\n # i. randomly generated vector of size m\n # (length of latitudes, in this case?)\n # scale entire vector by its magnitude, to make magnitude = 1\n u1 = scale_mag_1(np.random.rand(self.ATA.shape[0]))\n un = u1 # at first, u_n is u_1 and random\n\n diff = 1 # set initial diff too high to trip while loop\n while diff > 1e-3:\n\n print(\"Doing ii\")\n # ii. u_(n+1) = A^T*A*u_n\n u1more = np.dot(self.ATA, un)\n\n print(\"Doing iii\")\n # iii. u_(n+1) = u_(n+1) - Sigma_j^(i-1)(u_(n+1)^T * V_j) * V_j\n u1more = u1more - np.sum([\n np.dot(np.dot(u1more.T, Vs[j]), Vs[j]) for j in range(i)\n ])\n\n print(\"Doing iv\")\n # iv. u_(n+1) = u_(n+1) / || u_(n+1) ||\n # just norm mag\n u1more = scale_mag_1(u1more)\n\n diff = mag(u1more - un)\n print(\"Diff:\", diff)\n\n un = u1more\n\n Vs[i] = un", "def gem_solve(A, b):\r\n\tstart = time()\r\n\tn = len(A)\r\n\tU = [[0.0 for k in range(n)] for k in range(n)]\r\n\tfor k in range(n):\r\n\t\tfor i in range(k+1,n):\r\n\t\t\tA[i][k] = A[i][k]/A[k][k]\r\n\t\t\tb[i] = b[i] - A[i][k]*b[k]\r\n\t\tfor j in range(k+1,n):\r\n\t\t\tfor i in range(k+1, n):\r\n\t\t\t\tA[i][j] = A[i][j]-A[i][k]*A[k][j]\r\n\t\t\t\t\r\n\tfor i in range(n):\r\n\t\tfor j in range(n):\r\n\t\t\tif i>j:\r\n\t\t\t\tU[i][j] = 0\r\n\t\t\telse:\r\n\t\t\t\tU[i][j] = A[i][j]\r\n\t\r\n\tx, place = backward(U, b)\r\n\tend = time()\r\n\treturn x, (end-start)", "def svd_approx(A, k):", "def stabilizer_vector(v, g, n):\n vg = v.copy()\n w = v.copy()\n for i in range(1, n):\n vg *= g \n w += vg\n assert v == vg * g\n if (w['B'] == 0).all():\n return None\n return w", "def gaussian2(a, prec=1e-8):\n dependent = []\n independent = []\n row, column = a.shape\n if row < column:\n a = np.vstack((a, np.zeros((column-row, column))))\n row, column = a.shape\n a = np.where(np.abs(a)<prec, 0, a).astype('double')\n irow = 0\n for i in range(column):\n for j in range(irow+1, row):\n if abs(a[j,i]) -abs(a[irow, i]) > prec:\n a[[irow, j]] = a[[j,irow]] # interchange the irowth and jth row\n\n if abs(a[irow, i]) > prec:\n dependent.append(i)\n a[irow, i:] /= a[irow, i]\n for j in range(irow) + range(irow+1, row):\n a[j, i:] -= a[irow, i:] / a[irow, i] * a[j,i]\n irow += 1\n else:\n independent.append(i)\n param = np.zeros((column, len(independent)), dtype='double')\n if len(independent) >0:\n for i, de in enumerate(dependent):\n for j, ind in enumerate(independent):\n param[de, j] = -a[i, ind]\n for i, ind in enumerate(independent):\n param[ind, i] = 1\n return a, param, independent", "def svd_approx(A, k):\n U,s,Vh=la.svd(A,full_matrices=False)\n return U[:,:k].dot(np.diag(s[:k])).dot(Vh[:k,:])" ]
[ "0.72205275", "0.69978184", "0.6909211", "0.664306", "0.6558458", "0.647643", "0.64515245", "0.63309264", "0.62061775", "0.61396176", "0.6073213", "0.60224277", "0.6003381", "0.5858073", "0.58359253", "0.5735665", "0.5601619", "0.55769444", "0.5568682", "0.55639195", "0.5558914", "0.5556803", "0.5530713", "0.5516941", "0.5377437", "0.5365432", "0.53587645", "0.53120196", "0.5311218", "0.5305769" ]
0.7118353
1
Parses YAML document using filepath, returns dict.
def load(filePath): stream = open(filePath, 'r') yamlDict = yaml.safe_load(stream) return yamlDict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_yaml_file(filepath: str) -> Dict:\n return yaml.safe_load(read_file(filepath))", "def read_yaml(path: PathLike) -> Dict:\n with open(path, \"r\") as read_file:\n return yaml.load(read_file, Loader=yaml.UnsafeLoader)", "def load_yaml(path: str) -> Dict[str, Any]:\n with open(path, \"r\", encoding=\"utf8\") as fp:\n data = yaml.safe_load(fp)\n return data", "def load_yaml(path: str) -> dict:\n with open(path, 'r') as f:\n yaml_file = yaml.load(f, Loader=yaml.FullLoader)\n return yaml_file", "def read_yaml_file(path: Union[str, pathlib.Path]) -> dict:\n\n if isinstance(path, (str, pathlib.Path)):\n with open(path, 'r') as fp:\n config = yaml.safe_load(fp)\n else:\n # Assume it's an stream\n config = yaml.safe_load(path)\n\n return config", "def load_yaml(file_path: str) -> dict:\n assert file_path.endswith(\".yaml\")\n with open(file_path) as file:\n return yaml.load(file, Loader=yaml.FullLoader)", "def load_yaml(path):\n if os.path.exists(path):\n f = open(path)\n data = yaml.load(f)\n f.close()\n return data\n else:\n return {}", "def _parse_from_yaml(self) -> Dict:\n config_path = path.join(path.dirname(path.abspath(__file__)), self.config_file)\n try:\n with open(config_path, \"r\") as f:\n config_dict = yaml.load(f, Loader=yaml.FullLoader)\n return config_dict\n except FileNotFoundError as fnfe:\n raise FileNotFoundError('configuration file not found.')\n except Exception as exc:\n raise Exception('Error while loading config file.')", "def load_yaml(path):\n if os.path.exists(path):\n f = open(path)\n data = yaml.load(f)\n f.close()\n return data\n else:\n # This should maybe throw an exception or something\n return {}", "def yaml_file_to_dict(filepath):\n for extension in YAML_AUTO_EXTENSIONS:\n try:\n with open(filepath + extension) as yaml_file:\n return yaml.load(yaml_file) # , Loader=yaml.FullLoader)\n except IOError as error:\n logger.debug(\n \"IOError (%s) File not found with %s, trying another extension pattern.\",\n error.errno,\n filepath + extension,\n )\n raise FileNotFoundError(\n \"All file extensions tried and none worked for %s\" % filepath\n )", "def load_yaml(filepath):\n with open(filepath, 'r') as stream:\n try:\n return yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)", "def load_yaml_file(i):\n\n import yaml\n\n fn = i['yaml_file']\n\n try:\n if sys.version_info[0] > 2:\n f = open(fn, 'r', encoding='utf8')\n else:\n f = open(fn, 'r')\n except Exception as e:\n return {'return': 16, 'error': 'problem opening YAML file='+fn+' ('+format(e)+')'}\n\n try:\n s = f.read()\n except Exception as e:\n f.close()\n return {'return': 1, 'error': 'problem reading YAML file='+fn+' ('+format(e)+')'}\n\n f.close()\n\n try:\n d = yaml.load(s, Loader=yaml.FullLoader)\n except Exception as e:\n return {'return': 1, 'error': 'problem parsing YAML from file='+fn+' ('+format(e)+')'}\n\n return {'return': 0, 'dict': d}", "def read_config(path: str) -> Dict[str, Any]:\n\n with open(path, 'r') as stream:\n config = yaml.load(stream, Loader=yaml.FullLoader)\n return config", "def load_config(filepath=None):\n if filepath is None:\n raise ValueError(\"The filepath is None, please check the config file is exist\")\n\n with open(filepath, \"r\") as stream:\n output = dict()\n try:\n content = yaml.load(stream)\n output.update(content)\n return output\n except yaml.YAMLError as e:\n print(e)", "def read_yaml(yaml_file: str) -> dict:\n with open(yaml_file, 'r', encoding=\"utf8\") as _file:\n _dict = yaml.safe_load(_file)\n logging.info(f\"Yaml file {yaml_file} parsed!\")\n\n return _dict", "def load_yaml(file_path):\n with open(file_path) as fin:\n content = yaml.load(fin, Loader=yaml.FullLoader)\n return content", "def read_exercise_yaml(path_yaml):\n exer_dict = {}\n with open(path_yaml, 'r') as stream:\n try:\n exer_dict = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n print(exc)\n return exer_dict", "def read_yaml(yaml_path):\n with open(yaml_path) as f:\n yaml_data = yaml.load(f, Loader=yaml.FullLoader)\n\n return yaml_data", "def load(path: str='config.yaml'):\n file = Path(path).open()\n result = yaml.safe_load(file)\n\n debug(f'YAML file {path} loaded and parsed succesful')\n\n return result", "def load_yaml(fname: str) -> dict:\n try:\n with open(fname, 'r') as f:\n dataMap = yaml.safe_load(f)\n except IOError as e:\n print(f\"Cannot open YAML file {fname}\")\n print(f\"IOError: {e}\")\n \n return dataMap", "def load_file(self, filepath):\n filepath = self._yaml_extension(filepath)\n data = self._load_data_yaml(filepath)\n return data", "def load_yaml(yml_file):\n with open(yml_file) as src:\n cfg = yaml.load(src, Loader=yaml.Loader)\n return cfg", "def LoadYaml(path):\n #NOTE(g): Import is done here, instead of the top of the file, to not require this module if it is not used\n import yaml\n \n fp = None\n try:\n fp = open(path)\n \n data = yaml.load(fp)\n \n finally:\n if fp:\n fp.close()\n \n return data", "def parse_config_file(fpath):\n if not os.path.isfile(fpath):\n raise RuntimeError('ERROR: Unable to find config file at path: {}'\n .format(fpath))\n\n with open(fpath, 'r') as f:\n return yaml.safe_load(f)", "def read_yaml(preset_file: Text) -> Dict:\n with open(preset_file, \"r\") as preset_file:\n return yaml.safe_load(preset_file)", "def load_from_yaml_file(f: Union[str, TextIO]) -> Dict:\n\n # support environment variables in config\n # https://stackoverflow.com/a/55301129\n\n # For maximum compatibility with PyGeoApi config files, this function is\n # inspired by the yaml_load() function in pygeoapi/util.py here:\n # https://github.com/geopython/pygeoapi/blob/2c567d25f70daa3ed0a047ae548a3dfcd97c7cc2/pygeoapi/util.py#L100\n path_matcher = re.compile(r'.*\\$\\{([^}^{]+)\\}.*')\n\n def path_constructor(loader, node):\n env_var = path_matcher.match(node.value).group(1)\n if env_var not in os.environ:\n raise EnvironmentError(\"Undefined environment variable in config\")\n return str_to_python(path.expandvars(node.value))\n\n class EnvVarLoader(yaml.SafeLoader):\n pass\n\n EnvVarLoader.add_implicit_resolver('!path', path_matcher, None)\n EnvVarLoader.add_constructor('!path', path_constructor)\n do_close = False\n if isinstance(f, str):\n f = open(f, \"r\")\n resp = yaml.load(f, Loader=EnvVarLoader)\n if do_close:\n f.close()\n return resp", "def load_config(path=\"configs/default.yaml\") -> dict:\n with open(path, \"r\", encoding=\"utf-8\") as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n return cfg", "def get_yaml_cfg(yaml_filepath):\n with open(str(yaml_filepath), 'r') as fileobj:\n cfg = AttrDict(yaml.safe_load(fileobj))\n return cfg", "def load_yaml(path):\n fsock = open(path)\n \n try:\n yaml_string = fsock.read()\n yaml_obj = yaml.load(yaml_string)\n \n finally:\n fsock.close()\n\n return yaml_obj", "def load_config_file(path):\n with open(path) as file:\n return yaml.load(file, Loader=yaml.FullLoader)" ]
[ "0.7903323", "0.7508515", "0.74972093", "0.74559927", "0.7386771", "0.736159", "0.735117", "0.72731614", "0.71901506", "0.71841854", "0.715368", "0.71515054", "0.70432895", "0.70343006", "0.6987099", "0.69750446", "0.6970174", "0.68350554", "0.68124586", "0.6810847", "0.6797014", "0.67776483", "0.67643154", "0.6746865", "0.6744006", "0.67303675", "0.6712484", "0.6705489", "0.66848683", "0.6679574" ]
0.7580855
1
Processes pyYAML output; resolves references and evaluates arithmetic expressions.
def process(yamlDict, subDict=None, path=[], first=True): if subDict is None: subDict = yamlDict.copy() for key, value in subDict.items(): if first: first = False path = path + [key] else: path[-1] = key if isinstance(value, dict): process(yamlDict, value, path) elif isinstance(value, str): while "ref" in value: # Parse value for target idxA = value.find("ref(") + 4 idxB = value[idxA:].find(')') + idxA target = value[idxA:idxB].split('.') # Error handling: circular reference if target == path: raise ValueError("Circular reference in input file", value) # Error handling: invalid reference try: targetValue = get_value(yamlDict, target) except: raise KeyError("Invalid reference in input file", value) # Value may be float, must cast to string refStr = "ref(" + value[idxA:idxB] + ')' value = value.replace(refStr, str(targetValue)) # Evaluate any arithmetic expressions & reassign field value = math_eval(value) set_value(yamlDict, value, path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n res = []\n\n all_lines = [l.strip() for l in sys.stdin]\n\n # Possibly the stats are empty? This can happen if nothing passes.\n if all_lines:\n\n assert all_lines[0] == \"General summary:\"\n\n res.append( [ all_lines[0].split(':')[0], [] ] )\n summary_bits = res[-1][1]\n\n for l in all_lines[1:]:\n if ':' in l:\n summary_bits.append([v.strip() for v in l.split(':')])\n else:\n res.append( [ l, [] ] )\n summary_bits = res[-1][1]\n\n # Now see if we can parse out some numbers\n for cat, lines in res:\n for l in lines:\n for bit in l[1].split():\n bit = bit.strip('();')\n try:\n if '.' in bit:\n l.append( float(re.sub('[,Mb%]', '', bit)) )\n else:\n l.append( int(re.sub('[,Mb%]', '', bit)) )\n except ValueError:\n l.append(bit)\n\n #pprint(res)\n print(yaml.safe_dump(res), end='')", "def _dump_yaml(cls, dumper: yaml.Dumper, source: \"YamlModifier\") -> typing.Any:\n return dumper.represent_scalar(source.label(), source.value)", "def test_yaml_parsing():\n\n # Parser handles no options\n yaml_content = \"\"\"\n ---\n test: 2\n \"\"\"\n exp_builder = ExperimentBuilder(textwrap.dedent(yaml_content))\n # The plus 1 is because we overwrite disable_alchemical_dispersion_correction.\n expected_n_options = (len(exp_builder.GENERAL_DEFAULT_OPTIONS) +\n len(exp_builder.EXPERIMENT_DEFAULT_OPTIONS) + 1)\n assert len(exp_builder._options) == expected_n_options\n\n # Correct parsing\n yaml_content = \"\"\"\n ---\n options:\n verbose: true\n resume_setup: true\n resume_simulation: true\n output_dir: /path/to/output/\n setup_dir: /path/to/output/setup/\n experiments_dir: /path/to/output/experiments/\n platform: CPU\n precision: mixed\n switch_experiment_interval: -2.0\n processes_per_experiment: 2\n max_n_contexts: 9\n switch_phase_interval: 32\n temperature: 300*kelvin\n pressure: null\n constraints: AllBonds\n hydrogen_mass: 2*amus\n randomize_ligand: yes\n randomize_ligand_sigma_multiplier: 1.0e-2\n randomize_ligand_close_cutoff: 1.5 * angstrom\n anisotropic_dispersion_cutoff: null\n default_timestep: 2.0 * femtosecond\n default_nsteps_per_iteration: 2500\n default_number_of_iterations: .inf\n equilibration_timestep: 1.0 * femtosecond\n number_of_equilibration_iterations: 100\n minimize: False\n minimize_tolerance: 1.0 * kilojoules_per_mole / nanometers\n minimize_max_iterations: 0\n annihilate_sterics: no\n annihilate_electrostatics: true\n alchemical_pme_treatment: direct-space\n disable_alchemical_dispersion_correction: no\n \"\"\"\n\n exp_builder = ExperimentBuilder(textwrap.dedent(yaml_content))\n assert len(exp_builder._options) == 33\n\n # The global context cache has been set.\n assert mmtools.cache.global_context_cache.capacity == 9\n\n # Check correct types\n assert exp_builder._options['output_dir'] == '/path/to/output/'\n assert exp_builder._options['pressure'] is None\n assert exp_builder._options['constraints'] == openmm.app.AllBonds\n assert exp_builder._options['anisotropic_dispersion_cutoff'] is None\n assert exp_builder._options['default_timestep'] == 2.0 * unit.femtoseconds\n assert exp_builder._options['randomize_ligand_sigma_multiplier'] == 1.0e-2\n assert exp_builder._options['default_nsteps_per_iteration'] == 2500\n assert type(exp_builder._options['default_nsteps_per_iteration']) is int\n assert exp_builder._options['default_number_of_iterations'] == float('inf')\n assert exp_builder._options['number_of_equilibration_iterations'] == 100\n assert type(exp_builder._options['number_of_equilibration_iterations']) is int\n assert exp_builder._options['minimize'] is False", "def test_PhonopyYaml_read(helper_methods):\n filename = cwd / \"phonopy.yaml\"\n cell = _get_unitcell(filename)\n _compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)", "def _dump_yaml(cls, dumper: yaml.Dumper, source: \"YamlModifier\") -> typing.Any:\n return dumper.represent_scalar(source.label(), source.value[\"original\"])", "def test_yaml(self):\n\n # Check yml file can be loaded correctly\n with open(\"{}/app_spec.yml\".format(self.APP_PATH), mode='r',\n encoding=\"utf-8\", errors='ignore') as stream:\n # Load yaml file\n try:\n yaml_obj = yaml.load(stream) or {}\n except Exception as e:\n self.fail(msg=\"app_spec.yml cannot be loaded\")\n ll = ['input', 'output']\n check_list = ['value_type']\n\n for l in ll:\n l_obj = yaml_obj.get(l, None)\n # Check [input] and [output] section\n with self.subTest(name=f\"[{l}] section\"):\n self.assertIsNotNone(\n l_obj,\n msg=f\"[{l}] section missing in app_spec.yml\")\n\n for k, v in l_obj.items():\n for cl in check_list:\n with self.subTest(name=f\"[{l}:{k}]\"):\n value = v.get(cl)\n self.assertIsNotNone(\n value,\n msg=f\"[{k}/{cl}] missing in app_spec.yml\")\n if l == 'input' and 'value_range' in v and v['value_range']:\n with self.subTest(\n name=f\"[input:{k}] section\"):\n self.assertTrue(\n type(v['value_range']) is list,\n msg=f\"value_range [input:{k}] not a list\")", "def yaml_to_python():\n\n some_string = \"\"\"\nname: Glen Jarvis\nsex: Male\ntitle: Senior Developer\nhp: [32, 71]\nsp: [1, 13]\ngold: 423\ninventory:\n - A laptop\n - Some code\n - A lot of hope\n\"\"\"\n\n some_python = yaml.load(some_string)\n\n print(\"YAML -> Python Example\")\n print(\"type(some_string): {0}\".format(type(some_string)))\n print(\"type(some_python): {0}\".format(type(some_python)))\n\n print(\"\\n\\nYAML (really string in Python):\")\n pprint.pprint(some_string)\n print(\"\\n\\nPython:\")\n pprint.pprint(some_python)", "def import_(self, node):\n yamal_name = os.path.join(self._root, self.construct_scalar(node))\n\n with open(yamal_name, 'r') as yamal_file:\n return yaml.load(yamal_file, ImportLoader)", "def from_yaml(self, yaml):\n self.hwAddress = yaml.get('hwAddress')\n if self.hwAddress:\n self.hwAddress = self.hwAddress.lower()\n self.ip = yaml.get('IP')\n self.formulas = {}\n for f in yaml:\n if isinstance(yaml[f], dict):\n self.formulas[f] = yaml[f]\n\n self.hwtype = yaml.get('hwtype')", "def yaml_parser_preprocess_pipeline_outpout_parameter(config):\n qs_system_name = config['query_system']['system_short_name']\n seq_db_name = config['refseq']['refseq_data']\n save_dir = config['output']['save_dir']\n\n config['output']['system_save_dir'] = join(save_dir, qs_system_name)\n config['output']['hmm_hits_raw_output_dir'] = join(save_dir, qs_system_name, config['output']['hmm_hits_raw_output_prefix_dir'])\n config['output']['hmm_hits_dir'] = join(save_dir, qs_system_name, config['output']['hmm_hits_prefix_dir'])\n config['output']['filtered_hmm_hits_file'] = join(save_dir,qs_system_name, seq_db_name + '.' + qs_system_name + '.' + config['output']['filtered_hmm_hits_postfix'])\n config['output']['system_specific_annotations_dir'] = join(save_dir, qs_system_name, config['output']['system_specific_annotations_dirname_postfix'])\n config['output']['system_neighborhood_specific_annotations_dir'] = join(save_dir, qs_system_name, config['output']['system_neighborhood_specific_annotations_dirname_postfix'])\n\n config['output']['systems_file'] = join(save_dir, qs_system_name, seq_db_name + '.' + qs_system_name + '.' + config['output']['act_qs_system_patterns_summary_postfix'])\n config['output']['systems_components_file'] = join(save_dir,qs_system_name, seq_db_name + '.' + qs_system_name + '.' + config['output']['act_qs_system_patterns_postfix'])\n\n #database:\n\n config['database']['password'] = ''.join([line.strip() for line in open(config['database']['password_file'])])", "def _parse_jinja2_variables(meta_yaml: str) -> dict:\n meta_yaml_lines = meta_yaml.splitlines()\n env = jinja2.Environment()\n parsed_content = env.parse(meta_yaml)\n all_nodes = list(parsed_content.iter_child_nodes())\n\n jinja2_exprs = {}\n jinja2_vals = {}\n for i, n in enumerate(all_nodes):\n if isinstance(n, jinja2.nodes.Assign) and isinstance(\n n.node,\n jinja2.nodes.Const,\n ):\n if _config_has_key_with_selectors(jinja2_vals, n.target.name):\n # selectors!\n\n # this block runs if we see the key for the\n # first time\n if n.target.name in jinja2_vals:\n # we need to adjust the previous key\n # first get the data right after the key we have\n jinja2_data = (\n all_nodes[jinja2_vals[n.target.name][1] + 1].nodes[0].data\n )\n\n # now pull out the selector and reset the key\n selector_re = SELECTOR_RE.match(jinja2_data)\n if selector_re is not None:\n selector = selector_re.group(1)\n new_key = n.target.name + CONDA_SELECTOR + selector\n jinja2_vals[new_key] = jinja2_vals[n.target.name]\n del jinja2_vals[n.target.name]\n\n # now insert this key - selector is the next thing\n jinja2_data = all_nodes[i + 1].nodes[0].data\n selector_re = SELECTOR_RE.match(jinja2_data)\n if selector_re is not None:\n selector = selector_re.group(1)\n new_key = n.target.name + CONDA_SELECTOR + selector\n jinja2_vals[new_key] = (n.node.value, i)\n else:\n jinja2_vals[n.target.name] = (n.node.value, i)\n else:\n jinja2_vals[n.target.name] = (n.node.value, i)\n elif isinstance(n, jinja2.nodes.Assign):\n if isinstance(n.target, jinja2.nodes.Tuple):\n for __n in n.target.items:\n jinja2_exprs[__n.name] = meta_yaml_lines[n.lineno - 1]\n else:\n jinja2_exprs[n.target.name] = meta_yaml_lines[n.lineno - 1]\n\n # we don't need the indexes into the jinja2 node list anymore\n for key, val in jinja2_vals.items():\n jinja2_vals[key] = jinja2_vals[key][0]\n\n return jinja2_vals, jinja2_exprs", "def test_event_post_yaml_parse(self) -> None:\n\n @Event.PostYAMLParse.subscribe\n def hook(bib: Dict[str, Entry]) -> None:\n bib[\"Cao_2019\"].data[\"month\"] = \"August\"\n\n reference = self.EXAMPLE_ENTRY_DICT.copy()\n reference[\"month\"] = \"August\"\n\n assert Event.PostYAMLParse.validate()\n\n entries = YAMLParser().parse(self.EXAMPLE_YAML_FILE)\n entry = list(entries.values())[0]\n assert entry.data == reference", "def Transform(self, registration):\n yaml = yp.YamlPrinter()\n self._TransformKnownFields(yaml, registration)\n self._TransformRemainingFields(yaml, registration)", "def process(input_path, output_path):\n save_plist(fix_data(load_yaml(input_path)), output_path)", "def _include_yaml(loader: SafeLineLoader, node: yaml.nodes.Node) -> JSON_TYPE:\n fname = os.path.join(os.path.dirname(loader.name), node.value)\n try:\n return _add_reference(load_yaml(fname), loader, node)\n except FileNotFoundError as exc:\n raise XKNXException(f\"{node.start_mark}: Unable to read file {fname}.\") from exc", "def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"YamlModifier\":\n value = loader.construct_scalar(typing.cast(yaml.ScalarNode, node))\n return cls(value)", "def apply(args):\n html_doc = document.Document(get_code(args.file))\n with open(args.transform_file, 'r', encoding='UTF-8') as tfr_file:\n tfr_json = yaml.load(tfr_file)\n not_applied = html_doc.apply(tfr_json)\n\n if len(not_applied) == 0:\n print('All transforms applied.')\n else:\n print('The following transforms could not be applied:')\n print(yaml.dump(not_applied))\n set_code(args.file, html_doc)", "def load(path: str) -> Any:\n config = load_configs(path)\n config.reduce(config.MUTATIONS)\n config.reduce('_reduce')\n for reduces in config.output.get('_reduce') or []:\n for item in reduces or [None]:\n config.reduce(item)\n\n output = config.output\n for post_process in output.get('_post_process') or []:\n file_info = find(post_process)\n file_info.search(file_info.module)(output)\n return output", "def main(cls, **kwargs):\n try:\n import file_transformer\n except Exception as e:\n sys.exit(\"{}\\nSee https://github.com/benkehoe/file-transformer\".format(e))\n \n def loader(input_stream, args):\n return yaml.load(input_stream)\n \n def processor(input, args):\n transform = cls(input, vars(args))\n transform.apply()\n return transform.template\n \n def dumper(output, output_stream, args):\n yaml.dump(output, output_stream)\n \n return file_transformer.main(processor, loader, dumper, **kwargs)", "def rule_convert(source_path, build_path):\n logging.info(\n \"Searching path `{}` for YAML rule definitions to convert ...\".format(\n source_path\n )\n )\n set_logger()\n convert_rules(source_path, build_path)", "def test_PhonopyYaml_read_with_stream(helper_methods):\n filename = cwd / \"phonopy.yaml\"\n with open(filename) as fp:\n cell = _get_unitcell(fp)\n _compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)", "def enrich(yamlfile, results, **args):\n yamlobj = yaml.load(open(yamlfile))\n cache = {}\n infer_enum_meanings(yamlobj, cache=cache)\n if results is not None:\n with open(results, \"w\") as io:\n #io.write(str(cache))\n io.write(yaml.dump(cache))\n print(yaml.dump(yamlobj, default_flow_style=False, sort_keys=False))", "def main():\n\n print(f\"plist-yaml-plist version {VERSION}\")\n\n if len(sys.argv) < 2:\n usage()\n exit(1)\n\n in_path = sys.argv[1]\n\n # auto-determine which direction the conversion should go\n if in_path.endswith(\".yaml\") or in_path.endswith(\".yaml\"):\n filetype = \"yaml\"\n elif in_path.endswith(\".json\"):\n filetype = \"json\"\n elif in_path.endswith(\".plist\"):\n filetype = \"plist\"\n else:\n filetype = \"other\"\n\n if filetype == \"yaml\" or filetype == \"json\":\n # allow for converting whole folders if a glob is provided\n _, glob_files = os.path.split(in_path)\n if \"*\" in glob_files:\n glob_files = glob.glob(in_path)\n for glob_file in glob_files:\n out_path = get_out_path(glob_file, filetype)\n if filetype == \"yaml\":\n print(\"Processing YAML folder with globs...\")\n yaml_plist(glob_file, out_path)\n elif filetype == \"json\":\n print(\"Processing JSON folder with globs...\")\n json_plist(glob_file, out_path)\n else:\n try:\n sys.argv[2]\n except IndexError:\n out_path = get_out_path(in_path, filetype)\n else:\n out_path = sys.argv[2]\n if filetype == \"yaml\":\n print(\"Processing yaml file...\")\n if out_path == \"--tidy\":\n tidy_yaml(in_path)\n else:\n yaml_plist(in_path, out_path)\n elif filetype == \"json\":\n print(\"Processing json file...\")\n json_plist(in_path, out_path)\n # allow for converting whole folders if 'YAML' or 'JSON' is in the path\n # and the path supplied is a folder\n elif os.path.isdir(in_path) and \"YAML\" in in_path:\n print(\"Processing YAML folder...\")\n filetype = \"yaml\"\n try:\n if sys.argv[2] == \"--tidy\":\n print(\"WARNING! Processing all subfolders...\\n\")\n for root, dirs, files in os.walk(in_path):\n for name in files:\n tidy_yaml(os.path.join(root, name))\n for name in dirs:\n tidy_yaml(os.path.join(root, name))\n elif os.path.isdir(sys.argv[2]):\n # allow batch replication of folder structure and conversion of yaml to plist\n # also copies other file types without conversion to the same place in the\n # hierarchy\n out_path_base = os.path.abspath(sys.argv[2])\n print(\"Writing to {}\".format(out_path_base))\n for root, dirs, files in os.walk(in_path):\n for name in dirs:\n working_dir = os.path.join(out_path_base, name)\n if not os.path.isdir(working_dir):\n print(\"Creating new folder \" + working_dir)\n os.mkdir(working_dir)\n for name in files:\n source_path = os.path.join(root, name)\n print(\"In path: \" + in_path)\n sub_path = re.sub(in_path, \"\", source_path)\n print(\"Subdirectory path: \" + sub_path)\n filename, _ = os.path.splitext(\n os.path.join(out_path_base, sub_path)\n )\n print(\"Source path: \" + source_path)\n if source_path.endswith(\".yaml\"):\n dest_path = filename + \".plist\"\n print(\"Destination path for plist: \" + dest_path)\n yaml_plist(source_path, dest_path)\n else:\n dest_path = os.path.join(\n os.path.join(out_path_base, sub_path)\n )\n print(\"Destination path: \" + dest_path)\n try:\n shutil.copy(source_path, dest_path)\n if os.path.isfile(dest_path):\n print(\"Written to \" + dest_path + \"\\n\")\n except IOError:\n print(\"ERROR: could not copy \" + source_path + \"\\n\")\n except IndexError:\n for in_file in os.listdir(in_path):\n in_file_path = os.path.join(in_path, in_file)\n out_path = get_out_path(in_file_path, filetype)\n yaml_plist(in_file_path, out_path)\n elif os.path.isdir(in_path) and \"JSON\" in in_path:\n print(\"Processing JSON folder...\")\n filetype = \"json\"\n for in_file in os.listdir(in_path):\n in_file_path = os.path.join(in_path, in_file)\n out_path = get_out_path(in_file_path, filetype)\n json_plist(in_file_path, out_path)\n elif os.path.isdir(in_path) and \"PLIST\" in in_path:\n print(\"Processing PLIST folder...\")\n filetype = \"plist\"\n if os.path.isdir(sys.argv[2]):\n # allow batch replication of folder structure and conversion of plist to yaml\n # also copies other file types without conversion to the same place in the\n # hierarchy\n out_path_base = os.path.abspath(sys.argv[2])\n print(\"Writing to \" + out_path_base)\n for root, dirs, files in os.walk(in_path):\n for name in dirs:\n source_dir = os.path.join(root, name)\n sub_dir = re.sub(in_path, \"\", source_dir)\n working_dir = out_path_base + sub_dir\n if \"YAML\" in working_dir:\n # chances are we don't want to copy the contents of a YAML\n # folder here\n continue\n if not os.path.isdir(working_dir):\n print(\"Creating new folder \" + working_dir)\n os.mkdir(working_dir)\n for name in files:\n source_path = os.path.join(root, name)\n if \"YAML\" in source_path:\n # chances are we don't want to copy the contents of a YAML\n # folder here\n continue\n print(\"In path: \" + in_path)\n sub_path = re.sub(in_path, \"\", source_path)\n print(\"Subdirectory path: \" + sub_path)\n print(\"Source path: \" + source_path)\n if check_if_plist(source_path):\n filename = re.sub(\".plist\", \"\", out_path_base + sub_path)\n dest_path = filename + \".yaml\"\n print(\"Destination path for yaml: \" + dest_path)\n plist_yaml(source_path, dest_path)\n else:\n dest_path = out_path_base + sub_path\n print(\"Destination path: \" + dest_path)\n try:\n shutil.copy(source_path, dest_path)\n if os.path.isfile(dest_path):\n print(\"Written to \" + dest_path + \"\\n\")\n except IOError:\n print(\"ERROR: could not copy \" + source_path + \"\\n\")\n else:\n if check_if_plist(in_path):\n try:\n sys.argv[2]\n except IndexError:\n out_path = get_out_path(in_path, filetype)\n else:\n out_path = sys.argv[2]\n print(\"Processing plist file...\")\n plist_yaml(in_path, out_path)\n else:\n print(\"\\nERROR: Input File is not PLIST, JSON or YAML format.\\n\")\n usage()\n exit(1)", "def populate_string( yaml_string, data={}):\n import random\n\n def replace_in_line(line):\n if '{{' in line and '}}' in line and not ('#' in line and line.index('#') < line.index('{{')):\n begin = line.index('{{')\n end = line.index('}}', begin)\n variable_name = line[begin:end].strip().replace('{{','').replace('}}','').strip()\n try:\n return (\n line[:begin].replace('{{','').replace('}}','') +\n str(xeval(variable_name, merge(data, os.environ))) +\n line[end:].replace('}}','').replace('{{','')\n )\n except:\n var = locate_variable(line)\n raise Exception('yaml file needs all data to be evaluated: {{{{ {} }}}}'.format(variable_name))\n\n\n else:\n return line\n\n new_lines = list(map(replace_in_line, yaml_string.splitlines()))\n return '\\n'.join(new_lines)", "async def process(self, tokens):\n result = await self.parser.process(tokens)\n (((sign, _), p), _) = result\n if sign == '-':\n p.neg = True\n return p", "def parse_ruby_yaml_to_python_dict(file='plewic.01.0001.yaml'):\n python_yaml_name = \"plewic_python.\" + \".\".join(file.split(\".\")[1:])\n\n # expressions in a line that makes the function ignore it\n # I assume that the lines containing the following strings are short enough\n # so that they don't break to another line\n # lines with other markers to be ignored may be too long for one line so\n # they are handled with last_active=3\n ignore_expr = set([\":user:\",\n \":revision:\",\n \"!ruby/object:\"])\n\n # create a list that will hold dictionaries (one for each revision)\n # revision - one for every `text` key\n file_list = []\n\n # indicates whether the iteration process is in the errors section (\n # under `errors` marker)\n errors_section = False\n\n # the variable is a copy of the file given as an argument;\n # the original variable `file` can't be used because it is located in a\n # different module\n # set the variable to global so that the other function in the module\n # can use it\n global file_holder\n file_holder = file\n\n with open(file, 'r') as ruby_yaml:\n ignore_first_line = True\n\n # last active key; if -1 then no key has been active\n # active means that the key-value pair was saved to the `revision` dict\n last_active = -1\n\n while True:\n\n line = ruby_yaml.readline()\n\n if not line:\n break\n\n if ignore_first_line:\n # ignore the first line of the file that contains '---'\n ignore_first_line = False\n continue\n\n ignored_item_found = False\n\n # check if the ignored terms are in the line\n for ignored in ignore_expr:\n if ignored in line:\n ignored_item_found = True\n break\n if ignored_item_found:\n continue\n\n # go back to the line (key) that was previously active\n # text and new_text keys' values, comment values can exceed\n # character limit in a line\n active_key_lookup = {1: 'text', 2: 'new_text', 3: 'comment'}\n\n # errors_section is a section of the file that starts with `errors`\n # marker and ends with the last `error`'s `category` marker\n if not errors_section:\n\n # if `text` occured, a new revision dict must be initialised\n if ' text:' in line:\n # if the `text` marker has appeared it means that the\n # previous revision has been completed\n errors_section = False\n # time for a new revision dict; the previous one has been\n # fully saved to the list\n if 'revision' in locals():\n # previous revision dict has not been saved\n # saving it now before a new (empty) one is created\n file_list.append(revision)\n\n # no previous revision dict\n # new empty one is created\n revision = dict()\n # take the second part of the split. To cover unlikely\n # situation that `text: ` occurs more than once in a line I\n # use [1:] instead of [1];\n # [1:] returns a list so join() is needed\n # rstrip() removes trailing newline character\n revision['text'] = \"\".join(line.split(\"text:\")[1:]).\\\n lstrip().rstrip()\n # sanity check: the `text` field can't be empty\n # if so, inform the user\n check_if_field_is_empty(revision, \"text\")\n\n last_active = 1\n elif ' new_text:' in line:\n # take the second part of the split. To cover unlikely\n # situation that `new_text: ` occurs more than once in\n # a line I use [1:] instead of [1]; [1:] returns a list\n # so join() is needed;\n # rstrip() removes trailing newline character\n\n revision['new_text'] = \"\".join(\\\n line.split(\"new_text:\")[1:]).lstrip().rstrip()\n # sanity check: the `new text` field can't be empty\n # if so, inform the user\n check_if_field_is_empty(revision, \"new_text\")\n\n last_active = 2\n elif ' :valid_sentence:' in line:\n # handle valid_sentence marker which is the only attribute\n mapping_to_boolean = {'true': True, 'false': False}\n revision['valid_sentence'] = mapping_to_boolean[\\\n line.split(\":valid_sentence: \")[1].rstrip()]\n check_if_field_is_empty(revision, \"valid_sentence\")\n elif ' :comment:' in line or '- :title:' in line:\n # even though I don't save comments or titles to the\n # dictionary I need to monitor them so that they don't\n # get attached to other fields as defined in the `else`\n # section of the current `if` block\n last_active = 3\n\n elif ' errors:' in line:\n # each `error` marker will be stored in a separete dict\n # the key-value pair 'errors': [{'error': ...},\n # {'error': ...}, ...]\n # initialise an empty list for {'error': ...} dicts\n revision['errors'] = []\n elif ' error:' in line:\n errors_section = True\n # start new dictionary for a single error section\n single_error_dict = dict()\n # take the second part of the split; I assume the\n # second part is short enough so it doesn't exceed\n # the line's character limit\n single_error_dict['error'] = line.split(\"error: \")[1].\\\n rstrip(\"\\n\")\n check_if_field_is_empty(single_error_dict, \"error\")\n elif \" attributes:\\n\" == line:\n pass\n else:\n # check if the line contains non-key i.e. text from the\n # previous line\n # it may be the case that the content starts with a\n # additional whitespace (possibly whitespaces)\n # in such a case we must only remove the first four spaces\n # (four spaces indicate the second level of indentation)\n # split with four spaces and take the second part\n\n contents = \"\".join(line.split(\" \")[1:]).rstrip(\"\\n\")\n\n # I assume that we must add a space between the contents of\n # the old line and the new one\n if last_active == 1 or last_active == 2:\n # ignore the fact that the comment (last_active=3)\n # exceeds the character limit of the line\n revision[active_key_lookup[last_active]] += \" \" + \\\n contents\n else:\n # this block starts with `correction` marker because the error\n # line has already been read\n if ' correction:' in line:\n single_error_dict['correction'] = line.\\\n split(\"correction: \")[1].rstrip(\"\\n\")\n check_if_field_is_empty(single_error_dict, \"correction\")\n elif ' position:' in line:\n single_error_dict['position'] = line.\\\n split(\"position: \")[1].rstrip(\"\\n\")\n check_if_field_is_empty(single_error_dict, \"position\")\n elif ' attributes:' in line:\n # handle attributes of the error\n # the `attributes` key corresponds to a dict containing\n # attributes (indented by additional two whitespaces)\n error_attributes_dict = dict()\n continue\n elif \" :type:\" in line:\n # the last character in the split() argument is ':' because\n # each category starts with \":\"\n error_attributes_dict['type'] = line.split(\"type: :\")[1].\\\n rstrip(\"\\n\")\n check_if_field_is_empty(error_attributes_dict, 'type')\n elif \" :distance:\" in line:\n error_attributes_dict['distance'] = line.\\\n split(\"distance: \")[1].rstrip(\"\\n\")\n check_if_field_is_empty(error_attributes_dict, 'distance')\n elif \" :category:\" in line:\n # the `category` marker is the last attribute of an error\n # in other words it concludes the single error section\n # the single error dict must be appended to the errors list\n errors_section = False\n error_attributes_dict['category'] = line.\\\n split(\"category: \")[1].rstrip(\"\\n\")\n check_if_field_is_empty(single_error_dict, \"category\")\n # save the error_attributes_dict\n single_error_dict['attributes'] = error_attributes_dict\n # the single error dict must be appended to the errors list\n revision['errors'].append(single_error_dict)\n\n return file_list", "def test_read_phonopy_yaml(helper_methods):\n filename = cwd / \"phonopy.yaml\"\n cell = read_phonopy_yaml(filename).unitcell\n _compare_NaCl_convcell(cell, helper_methods.compare_cells_with_order)", "def test_01():\n text = \"a = 2 + 3 * (4 + 5)\"\n\n c = _ast.parse(text)\n print(_ast.dump(c))", "def parse_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"YamlModifier\":\n return cls._from_yaml(loader, node)", "def register_yaml(self, yaml_text):\n\n defs = yaml.load_all(yaml_text)\n for def_set in defs:\n for name,_def in def_set.iteritems():\n # TODO: Hook into pyyaml's event emitting stuff to try to get the canonical form without re-dumping\n def_text = yaml.dump(_def, canonical=True, allow_unicode=True)\n self.register_def(name, _def, def_text)" ]
[ "0.5250444", "0.50050116", "0.49924704", "0.49598306", "0.49067336", "0.48261368", "0.47995853", "0.4730459", "0.47279227", "0.47276902", "0.47207996", "0.4692445", "0.46897322", "0.46429425", "0.46355367", "0.4619667", "0.46165827", "0.4616196", "0.46019894", "0.45978037", "0.45945165", "0.4569066", "0.45616361", "0.45586362", "0.45547262", "0.45503068", "0.45485073", "0.454226", "0.45260364", "0.45202968" ]
0.58088744
0
Methode getTestObjects gibt fuer das uebergebene GMLFile ein temporaeres TestObject zurueck.
def requestTestObjects(self, gmlFile: str) -> dict: jsonData = {} url = '%s%s' % (self.__urlWebApp, '/v2/TestObjects?action=upload') headers = {'Accept': 'application/json'} with open(gmlFile, 'rb') as f: files = {'file': f} r = requests.post(url, files=files, headers=headers, proxies=self.__proxies) if r.status_code == 200: jsonData = json.loads(r.text) elif r.status_code == 413: message = '%s: %s' % (r.status_code, 'Uploaded test data are too large') raise ConnectionError(message) else: message = '%s: %s' % (r.status_code, 'File upload failed') raise ConnectionError(message) return jsonData
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_objects(self):\n\n obj = FBO(\n path=TEST_FILES_ROOT,\n metadata=FileObject.MetadataInFileHead,\n ).objects.all().filter(\n name__glob='*.rst',\n ).get(\n name='test2.rst',\n )\n\n self.assertEqual(\n 'Second in the alphabet',\n obj.title,\n )", "def test_get_file_object(self):\n pass", "def setUpClass(cls):\n clean_db() # remove all objects created by another tests\n # save the link to the content type\n cls.ctype = ContentType.objects.get_for_model(TestModel)\n # create an object\n cls.object = TestModel.objects.create(name=\"Test object\")\n # create two images related to the object\n cls.image1 = models.Image.objects.create(\n image=get_image_in_memory_data(),\n position=0,\n content_type=cls.ctype,\n object_id=cls.object.id\n )\n cls.image2 = models.Image.objects.create(\n image=get_image_in_memory_data(),\n position=1,\n content_type=cls.ctype,\n object_id=cls.object.id\n )\n # create another object without related images\n cls.alone_object = TestModel.objects.create(\n name=\"Alone test object\"\n )", "def loadTest(yObject,yMatch,features):\n\n nf = []\n for f in features:\n if f != 'label':\n nf.append(f)\n \n print 'Train features: {}'.format(features)\n print 'Test features: {}'.format(nf)\n \n # load test subject data, save as attribtues\n tObject = ld.loadH5(yObject,*['full'])\n ID = tObject.attrs['ID']\n\n parsedData = ld.parseH5(tObject,nf)\n tObject.close()\n\n data = parsedData[ID]\n mtd = cu.mergeFeatures(data,nf)\n\n threshed = ld.loadMat(yMatch)\n\n ltvm = cu.vertexMemberships(threshed,180)\n\n return [threshed,mtd,ltvm]", "def test_meshes_io(self):\n po = ProjectObject(\n id=\"foobar\",\n project_type=\"meshes\",\n meshes=self.meshes,\n pose=self.pose,\n category=\"chair\",\n )\n\n object_xml = po.save(self.temp_directory)\n po2 = ProjectObject.load(\"meshes\", object_xml, self.temp_directory)\n self.assertTrue(po2.almost_equal(po))", "def test():\n root_path = os.path.dirname(os.path.realpath(__file__))\n test_path = os.path.join(root_path, 'test_files')\n with open(os.path.join(test_path, 'hexagons0.geojson')) as f:\n hexagons_old = load(f)\n with open(os.path.join(test_path, 'hexagons1.geojson')) as f:\n hexagons_new = load(f) \n return hexagons_new, hexagons_old", "def create_test_set(self):\n test_files = os.listdir(self.image_folder_path)\n test_files = sorted_alphanumeric(test_files)\n delete_files(self.root_name, \"/VOC2021/ImageSets/Main\")\n write_txt(\"test.txt\", self.txt_path, test_files)", "def test_all_merge(self):\n\n test_folder = os.path.join('test_data', 'merging_tests', 'batch_test')\n # test_folder = base_path + '/test_data/merging_tests/batch_test/'\n results_folder = os.path.join(test_folder, 'results')\n # results_folder = test_folder+\"results/\"\n\n if not os.path.isdir(results_folder):\n os.mkdir(results_folder)\n\n # delete all files in output folder\n for the_file in os.listdir(results_folder):\n file_path = os.path.join(results_folder, the_file)\n if os.path.isfile(file_path):\n os.unlink(file_path)\n\n backgrounds_folder = os.path.join(test_folder, 'backgrounds')\n obj_poses_folder = os.path.join(test_folder, 'object_poses')\n\n mi.generate_for_all_objects(obj_poses_folder, backgrounds_folder, results_folder, adjust_brightness = True)\n self.assertEqual(len(os.listdir(obj_poses_folder)), len(os.listdir(results_folder)))\n\n for the_file in os.listdir(results_folder):\n file_path = os.path.join(results_folder, the_file)\n im = Image.open(file_path)\n self.assertEqual((300,300), im.size)\n self.assertEqual('JPEG', im.format)\n self.assertNotEqual('PNG', im.format)", "def test_new(self):\n storage = FileStorage()\n save = FileStorage._FileStorage__objects\n FileStorage._FileStorage__objects = {}\n test_dict = {}\n for key, value in classes.items():\n with self.subTest(key=key, value=value):\n instance = value()\n instance_key = instance.__class__.__name__ + \".\" + instance.id\n storage.new(instance)\n test_dict[instance_key] = instance\n self.assertEqual(test_dict, storage._FileStorage__objects)\n FileStorage._FileStorage__objects = save", "def make_tests(test_descr=TRANSFORMS_TESTINFO):\n tests = []\n for _transform, tr_input, tr_output, _normalize, _subobjects in test_descr:\n # load transform if necessary\n if type(_transform) is type(''):\n try:\n _transform = load(_transform).register()\n except MissingBinary:\n # we are not interessted in tests with missing binaries\n continue\n except:\n import traceback\n traceback.print_exc()\n continue\n\n if TR_NAMES is not None and not _transform.name() in TR_NAMES:\n print 'skip test for', _transform.name()\n continue\n\n class TransformTestSubclass(TransformTest):\n input = input_file_path(tr_input)\n output = output_file_path(tr_output)\n transform = _transform\n normalize = lambda x, y: _normalize(y)\n subobjects = _subobjects\n\n tests.append(TransformTestSubclass)\n\n return tests", "def get_tests(self, obj=None):\n # create class to unit test notebooks\n if obj is None:\n obj = \"{}\".format(self._name)\n obj = type(obj, (unittest.TestCase,), self.test_dict)\n else:\n for key, val in self.test_dict:\n setattr(obj, key, val)\n obj.ignore = self.ignore\n obj.py2_ignore = self.py2_ignore\n return obj", "def _get_random_object(self, num_objects, test):\n if test:\n urdf_pattern = os.path.join(self._urdfRoot, 'random_urdfs/*0/*.urdf')\n else:\n urdf_pattern = os.path.join(self._urdfRoot, 'random_urdfs/*[^0]/*.urdf')\n found_object_directories = glob.glob(urdf_pattern)\n total_num_objects = len(found_object_directories)\n selected_objects = np.random.choice(np.arange(total_num_objects),\n num_objects)\n selected_objects_filenames = []\n for object_index in selected_objects:\n selected_objects_filenames += [found_object_directories[object_index]]\n return selected_objects_filenames", "def test_json(test_data,tmp_path):\n\n for d in test_data:\n\n gpm = GenotypePhenotypeMap(wildtype=d[\"wildtype\"],\n genotype=d[\"genotype\"],\n phenotype=d[\"phenotype\"],\n uncertainty=d[\"uncertainty\"])\n\n # Write json file\n json_file = os.path.join(tmp_path,\"tmp.json\")\n gpm.to_json(filename=json_file)\n assert os.path.isfile(json_file)\n\n # Read json file\n new_gpm = gpmap.read_json(filename=json_file)\n conftest.compare_gpmap(gpm,new_gpm)", "def ConstrTest():\n with open(path.join(MAIN_PATH, TEST)) as f:\n for line in f:\n line = line.strip().split(\"\\t\")\n src, dest = line[1:]\n features = Features(src, dest)\n test_instances.append(features)", "def test():\n\t\treturn [\"vice.core.objects.tests\",\n\t\t\t[\n\t\t\t\tagb.test_agb_grid_constructor(),\n\t\t\t\tagb.test_agb_grid_destructor(),\n\t\t\t\tcallback_1arg.test_callback_1arg_constructor(),\n\t\t\t\tcallback_1arg.test_callback_1arg_destructor(),\n\t\t\t\tcallback_2arg.test_callback_2arg_constructor(),\n\t\t\t\tcallback_2arg.test_callback_2arg_destructor(),\n\t\t\t\tccsne.test_ccsne_yield_specs_constructor(),\n\t\t\t\tccsne.test_ccsne_yield_specs_destructor(),\n\t\t\t\tchannel.test_channel_constructor(),\n\t\t\t\tchannel.test_channel_destructor(),\n\t\t\t\telement.test_element_constructor(),\n\t\t\t\telement.test_element_destructor(),\n\t\t\t\tfromfile.test_fromfile_constructor(),\n\t\t\t\tfromfile.test_fromfile_destructor(),\n\t\t\t\thydrodiskstars.test_hydrodiskstars_constructor(),\n\t\t\t\thydrodiskstars.test_hydrodiskstars_destructor(),\n\t\t\t\timf.test_imf_constructor(),\n\t\t\t\timf.test_imf_destructor(),\n\t\t\t\tintegral.test_integral_constructor(),\n\t\t\t\tintegral.test_integral_destructor(),\n\t\t\t\tinterp_scheme_1d.test_interp_scheme_1d_constructor(),\n\t\t\t\tinterp_scheme_1d.test_interp_scheme_1d_destructor(),\n\t\t\t\tinterp_scheme_2d.test_interp_scheme_2d_constructor(),\n\t\t\t\tinterp_scheme_2d.test_interp_scheme_2d_destructor(),\n\t\t\t\tism.test_ism_constructor(),\n\t\t\t\tism.test_ism_destructor(),\n\t\t\t\tmdf.test_mdf_constructor(),\n\t\t\t\tmdf.test_mdf_destructor(),\n\t\t\t\tmigration.test_migration_constructor(),\n\t\t\t\tmigration.test_migration_destructor(),\n\t\t\t\tmultizone.test_multizone_constructor(),\n\t\t\t\tmultizone.test_multizone_destructor(),\n\t\t\t\tsinglezone.test_singlezone_constructor(),\n\t\t\t\tsinglezone.test_singlezone_destructor(),\n\t\t\t\tsneia.test_sneia_yield_specs_constructor(),\n\t\t\t\tsneia.test_sneia_yield_specs_destructor(),\n\t\t\t\tssp.test_ssp_constructor(),\n\t\t\t\tssp.test_ssp_destructor(),\n\t\t\t\ttracer.test_tracer_constructor(),\n\t\t\t\ttracer.test_tracer_destructor()\n\t\t\t]\n\t\t]", "def test_objectresource_loadallobjects(self):\n\n home01 = yield self.homeUnderTest(txn=self.theTransactionUnderTest(0), name=\"user01\", create=True)\n self.assertTrue(home01 is not None)\n calendar01 = yield home01.childWithName(\"calendar\")\n yield calendar01.createCalendarObjectWithName(\"1.ics\", Component.fromString(self.caldata1))\n yield self.commitTransaction(0)\n\n home = yield self._remoteHome(self.theTransactionUnderTest(1), \"user01\")\n self.assertTrue(home is not None)\n calendar = yield home.childWithName(\"calendar\")\n objects = yield calendar.objectResources()\n self.assertEqual(len(objects), 1)\n self.assertEqual(objects[0].name(), \"1.ics\")\n yield self.commitTransaction(1)", "def setUpClass(cls):\n\n with open('pygenprop/testing/test_constants/C_chlorochromatii_CaD3.faa') as fasta_one:\n with open('pygenprop/testing/test_constants/C_chlorochromatii_CaD3.tsv') as assignment_file_one:\n properties_one = parse_interproscan_file_and_fasta_file(assignment_file_one, fasta_file=fasta_one)\n\n with open('pygenprop/testing/test_constants/C_luteolum_DSM_273.faa') as fasta_two:\n with open('pygenprop/testing/test_constants/C_luteolum_DSM_273.tsv') as assignment_file_two:\n properties_two = parse_interproscan_file_and_fasta_file(assignment_file_two, fasta_file=fasta_two)\n\n with open('pygenprop/testing/test_constants/test_genome_properties_two.txt') as test_genome_properties_file:\n genome_properties_tree = parse_genome_properties_flat_file(test_genome_properties_file)\n\n cls.test_genome_property_results = [properties_one, properties_two]\n cls.test_tree = genome_properties_tree\n\n cls.engine = create_engine('sqlite://')", "def test_fobj():\n Level3File(get_test_data('nids/Level3_FFC_N0Q_20140407_1805.nids'))", "def run_tests():\n suite = unittest.TestSuite()\n main_folder = os.path.join(os.path.dirname(__file__), \"data\", \"qgis\")\n for subfolder in os.listdir(main_folder):\n datafile = os.path.join(main_folder, subfolder, \"testlayer.gpkg\")\n if not os.path.exists(datafile):\n datafile = os.path.join(main_folder, subfolder, \"testlayer.tiff\")\n subfolder_path = os.path.join(main_folder, subfolder)\n for style in os.listdir(subfolder_path):\n if style.lower().endswith(\"qml\"):\n stylefile = os.path.join(subfolder_path, style)\n name = os.path.splitext(stylefile)[0]\n expectedfile = name + \".geostyler\"\n with open(expectedfile) as f:\n expected = json.load(f)\n setattr(\n QgisToStylerTest,\n \"test_\" + name,\n test_function(datafile, stylefile, expected),\n )\n\n suite = unittest.defaultTestLoader.loadTestsFromTestCase(QgisToStylerTest)\n unittest.TextTestRunner().run(suite)", "def gather_tests(self):\n rosie_tests_dir = os.path.join(cp_tests_dir(),\n \"circuitpython\",\n \"rosie_tests\")\n test_files = []\n for test in os.scandir(rosie_tests_dir):\n # TODO: implement exclusions by board\n if test.path.endswith(\".py\"):\n test_files.append(TestObject(test.path))\n\n return test_files", "def populate_objects(self):\n\t\t\n\t\t# Don't populate if already done\n\t\tif self.objects:\n\t\t\treturn\n\t\t\n\t\tself.object_dirs = []\n\t\tdir_regex = re.compile(\"^[0-9a-f]{2}$\")\n\t\tfile_regex = re.compile(\"^[0-9a-f]{38}$\")\n\t\t\n\t\t# Get list of object dirs\n\t\tfor o_dir in os.listdir(self.objects_root):\n\t\t\to_dir_path = os.path.join(self.objects_root, o_dir)\n\t\t\tif re.match(dir_regex, o_dir) and os.path.isdir(o_dir_path):\n\t\t\t\t# Looks like an object dir so far\n\t\t\t\tself.object_dirs.append((o_dir, o_dir_path))\n\t\t\n\t\t# Get list of object files\n\t\tfor o_dir, o_dir_path in self.object_dirs:\n\t\t\tfor o_file in os.listdir(o_dir_path):\n\t\t\t\to_file_path = os.path.join(o_dir_path, o_file)\n\t\t\t\tif re.match(file_regex, o_file) and os.path.isfile(o_file_path):\n\t\t\t\t\t# Looks like an object file so far\n\t\t\t\t\tself.objects.append(\n\t\t\t\t\t\tGitLooseObject(\n\t\t\t\t\t\t\tid = o_dir + o_file,\n\t\t\t\t\t\t\tpath = o_file_path\n\t\t\t\t\t\t)\n\t\t\t\t\t)", "def step_impl(context, objects_type):\n log.info(\"====> Verify the object defined in the xml file is in the objects directory\")\n nsa_container = get_nsa_container_string(context)\n\n if objects_type == \"ObjectsTest1\":\n object_list = resourceset_parameters.ObjectsTest1\n elif objects_type == \"ObjectsTest2\":\n object_list = resourceset_parameters.ObjectsTest2\n elif objects_type == \"ObjectsTest4\":\n object_list = resourceset_parameters.ObjectsTest4\n else:\n assert False, \"Failed: No objects_type parameter while posting. Got: {objects_type}\".format(objects_type=objects_type)\n\n for an_object in object_list:\n if verify_object_exists(context, resourceset_parameters.Object_names[an_object], nsa_container):\n log.info(\" ****> Passed: The {object_file} file exists in: {objects_path} as expected.\".format(object_file=resourceset_parameters.Object_names[an_object], objects_path=nsa_container))\n else:\n assert False, \" ****> Failed: The {object_file} file dose not exist in: {objects_path} when it should be there\".format(object_file=resourceset_parameters.Object_names[an_object], objects_path=nsa_container)", "def populate_objects(phylodata_objects, project_name, path_to_species_trees, path_to_gene_trees, path_to_ranger_outputs):\r\n\r\n\r\n #try and populate the species and gene files. should work.\r\n for obj in phylodata_objects:\r\n #print(\"Populating species trees\")\r\n obj.populate_species_tree(path_to_species_trees)\r\n #print(\"Populating gene trees\")\r\n obj.populate_gene_boots(path_to_gene_trees)\r\n\r\n\r\n #now try and populate ranger output, if not make directory and run run_rangerDTL\r\n for obj in phylodata_objects:\r\n #print(\"Checking for rangerDTL outputs\")\r\n exists = obj.populate_ranger_dtl_outputs(path_to_ranger_outputs)\r\n if exists is False:\r\n #run the program.\r\n print(\"Running RangerDTL\")\r\n path_to_ranger_outputs, list_of_ranger_outputs = annotate_ranger.run_rangerDTL(obj, project_name)\r\n #print(\"Checking for new rangerDTL outputs\")\r\n exists = obj.populate_ranger_dtl_outputs(path_to_ranger_outputs)\r\n if exists is False:\r\n print (\"error in rangerdtl_output assignation\")\r\n raise SystemExit\r\n return True", "def setUp(self):\n if not os.path.exists(TEMPDIR):\n os.makedirs(TEMPDIR)\n\n self.file1 = os.path.join(TEMPDIR, 'list1')\n tempfile = open(self.file1, 'w')\n tempfile.write('123\\r\\n'\n 'ggg\\n'\n '456\\n'\n '789\\n'\n '7 77\\r\\n'\n '7; 77\\r\\n'\n '7.77\\r\\n'\n '777a\\r\\n')\n tempfile.close()\n self.geneList1 = MetabolicList(self.file1)\n self.expected_length = 3\n self.expected_name = 'list1'\n self.expected_genes = [123, 456, 789]\n\n self.file2 = os.path.join(TEMPDIR, 'list2')\n tempfile = open(self.file2, 'w')\n tempfile.write('789\\n'\n '456\\n'\n '777')\n tempfile.close()\n self.geneList2 = GeneList(self.file2)\n self.expected_intersection = [456, 789]\n self.expected_intersection_length = [2]\n\n self.expected_hypergeometric_score = 0.15170278637770918", "def _load_test_data(self):\n self._save_test_data()", "def test_good_load(self):\n self.r0.save_to_file([self.r0, self.r1])\n objs = self.r0.load_from_file()\n self.assertEqual(str(objs[0]), '[Rectangle] (1) 0/0 - 2/3')\n self.assertEqual(str(objs[1]), '[Rectangle] (2) 0/0 - 4/6')", "def test_addTcfModelFile(self):\n print ('Testing add tcf ModelFile...')\n\n tuflow = copy.deepcopy(self.tuflow)\n loader = TuflowLoader()\n tcf = tuflow.control_files['TCF']\n geom = tcf.contains(command='Geometry Control')[0]\n \n line = \"Geometry Control File == ..\\\\model\\\\test_tgc2.tgc\"\n tgc_part = factory.TuflowFactory.getTuflowPart(line, tcf.mainfile)[0]\n tgc_control = loader.loadControlFile(tgc_part)\n tcf.addControlFile(tgc_part, tgc_control, after=geom)\n \n assert(tgc_part in tuflow.control_files['TGC'].control_files)\n \n test_part = tuflow.control_files['TGC'].contains(filename='shiptest_tgc2_v1_DTM_2m')\n assert(len(test_part) == 1)\n \n index = tcf.parts.index(tgc_part)\n assert(index == tcf.parts.index(geom) + 1)\n del tuflow\n\n print ('pass')", "def setUpClass(cls):\n clean_db() # delete all objets created by another tests\n # create a content object\n cls.object = TestModel.objects.create(name=\"TestObject\")", "def test_01_BuildObjects(self):\n # print(PrettyFormatAny.form(self.m_pyhouse_obj._Config, 'A3-01-A - Config', 190))\n # print(__file__)\n # print(PrettyFormatAny.form(self.m_pyhouse_obj._Config.YamlTree, 'Location', 190))\n # self.assertEqual(self.m_pyhouse_obj._Config.YamlConfigDir, '/etc/pyhouse/')", "def get_test_files(self):\n raise NotImplementedError" ]
[ "0.63484746", "0.5927141", "0.58990014", "0.5834164", "0.57331353", "0.5705799", "0.5692242", "0.5691409", "0.56715447", "0.5653191", "0.56211764", "0.56167984", "0.56016", "0.5597074", "0.5571964", "0.55381674", "0.5530272", "0.5512248", "0.54992706", "0.5481464", "0.54731834", "0.5472777", "0.54708177", "0.5469907", "0.5466103", "0.54649407", "0.54621273", "0.54231876", "0.5420387", "0.54104304" ]
0.6209419
1
Methode requestTestRunsProgress gibt true zurueck wenn der TestRun abgeschlossen ist.
def requestTestRunsProgress(self, testRunRef: str) -> bool: url = testRunRef.replace('.json', '/progress?pos=0') ist = 0; soll = 100 while ist < soll: time.sleep(8) r = requests.get(url, proxies=self.__proxies) if r.status_code == 200: json_data = json.loads(r.text) ist = int(json_data["val"]) soll = int(json_data["max"]) else: message = '%s: %s' % (r.status_code, 'Test Run not found') raise ConnectionError(message) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test__progress_callback(self):\n backend = self.test_init_valid()\n\n fake_total_size = 500\n num_tests = 10\n progress = 0\n\n for i in range(num_tests):\n result = backend._progress_callback(i * fake_total_size/num_tests, fake_total_size)\n self.assertEqual(progress, result)\n progress = progress + fake_total_size / (fake_total_size/num_tests)", "def hasProgress(self) -> bool:\n ...", "def hasProgress(self) -> bool:\n ...", "def hasProgress(self) -> bool:\n ...", "def hasProgress(self) -> bool:\n ...", "def progress(self):\n return self.runProgress", "def progress(self):\n return self.runProgress", "def report_scenario_progress(self):\n pass", "def startTest(self, test):\n\n if self.showAll:\n progress = f'[{next(self.test_numbers)}/{self.test_case_count}] '\n self.stream.write(progress)\n\n # Also store the progress in the test itself, so that if it errors,\n # it can be written to the exception information by our overridden\n # _exec_info_to_string method:\n test.progress_index = progress\n\n return super(CustomTextTestResult, self).startTest(test)", "def reportProgress(self):\n \n pass", "def test_completed(self):\n return False", "def test_progress(self):\r\n # Navigate to the progress page from the info page\r\n self.course_info_page.visit()\r\n self.tab_nav.go_to_tab('Progress')\r\n\r\n # We haven't answered any problems yet, so assume scores are zero\r\n # Only problems should have scores; so there should be 2 scores.\r\n CHAPTER = 'Test Section'\r\n SECTION = 'Test Subsection'\r\n EXPECTED_SCORES = [(0, 3), (0, 1)]\r\n\r\n actual_scores = self.progress_page.scores(CHAPTER, SECTION)\r\n self.assertEqual(actual_scores, EXPECTED_SCORES)", "def wait_progress(self):\n pass", "def wait_progress(self):\n pass", "def update_progress(self, done):\r\n if done % 100 == 0:\r\n print >>sys.stderr, \" %d processed, run time %d secs\" % (done, (datetime.now() - self.started_at).seconds)", "def check_progress(main_url, header, progress_key):\n print \"checking progress {}\".format(progress_key)\n time.sleep(1)\n progress_result = requests.get(\n main_url + '/api/v2/progress/{}'.format(progress_key),\n headers=header\n )\n print \"... {} ...\".format(progress_result.json()['progress'])\n\n if progress_result.json()['progress'] == 100:\n return progress_result\n else:\n progress_result = check_progress(main_url, header, progress_key)\n\n return progress_result", "def start_of_test_batch_hook(self, progress, logging_epoch):\n pass", "def set_test_passed(self):\n self.set_result(Status.PASSED)", "def test_get_progress(self):\r\n self.combinedoe.update_task_states()\r\n self.combinedoe.state = \"done\"\r\n self.combinedoe.is_scored = True\r\n progress = self.combinedoe.get_progress()\r\n self.assertIsInstance(progress, Progress)\r\n\r\n # progress._a is the score of the xmodule, which is 0 right now.\r\n self.assertEqual(progress._a, 0)\r\n\r\n # progress._b is the max_score (which is 1), divided by the weight (which is 1).\r\n self.assertEqual(progress._b, 1)", "def test_05_user_progress(self):\r\n url = '/api/app/1/userprogress'\r\n self.check_limit(url, 'get', 'app')", "def startTestRun(self):", "def report_scenario_progress(self):\n if not self.current_scenario:\n return # SKIP: No results to report for first scenario.\n # -- NORMAL-CASE:\n status_name = self.current_scenario.status.name\n dot_status = self.dot_status[status_name]\n if status_name == \"failed\":\n # MAYBE TODO: self.failures.append(result)\n pass\n self.stream.write(dot_status)\n self.stream.flush()", "def _RunTests(tests: List[_Test], parallelism: int) -> bool:\n running_tests = set()\n finished_tests = set()\n tests_to_run = sorted(tests, reverse=True)\n while tests_to_run or running_tests:\n time.sleep(0.2) # 200ms\n updated_finished = set(t for t in running_tests if t.Finished())\n running_tests = running_tests - updated_finished\n while tests_to_run and len(running_tests) < parallelism:\n t = tests_to_run.pop()\n t.Run()\n running_tests.add(t)\n\n newly_finished = updated_finished - finished_tests\n finished_tests.update(updated_finished)\n for test in newly_finished:\n logging.info(\"%s\\t%s\\t%.1fs\", test,\n \"PASSED\" if test.Succeeded() else \"FAILED\",\n test.finish_time - test.begin_time)\n if newly_finished:\n logging.flush()\n\n failed_tests = sorted([t for t in tests if not t.Succeeded()])\n logging.info(\"Ran %d tests. %d failed.\", len(tests), len(failed_tests))\n logging.flush()\n\n for ft in failed_tests:\n ft.PrintLogs()\n\n return not failed_tests", "def testingProgressFunc(state, action, text, tick):\n pass\n #print \"testingProgressFunc\", state, action, text, tick", "def failed(self):\n if len(self.progress) > 0:\n return self.progress[-1].status == TestStatus.canceled\n return False", "def start_test_run(self, request):\n request.worker.initialize_test_run(request.message.tests,\n request.message.run_data)\n\n return SuccessReply()", "def run_tests(self):\n with self.report.timer.record(\"run\"):\n self.result.report.extend(self._run_tests())", "def startTestRun(self, event):\n self.prof = cProfile.Profile()\n event.executeTests = self.prof.runcall", "def test_progress(token, issue_number, actor, commit, run_id):\n success_or_only_flakiness, log_summary = _get_summary_table(token, run_id)\n if success_or_only_flakiness and not log_summary:\n # succeeded (without flakiness)\n return\n else:\n if success_or_only_flakiness:\n # all failures/errors are due to flakiness (succeeded after retry)\n title = _COMMENT_TITLE_PROGESS_FLAKY\n else:\n # failures/errors still exist after retry\n title = _COMMENT_TITLE_PROGESS_FAIL\n firebase_github.add_label(token, issue_number, _LABEL_FAILED)\n comment = (title +\n _get_description(actor, commit, run_id) +\n log_summary +\n _COMMENT_FLAKY_TRACKER +\n _COMMENT_HIDDEN_DIVIDER)\n _update_comment(token, issue_number, comment)", "def __send_all(self):\n\n offset = self.app_id * 10\n\n print(\"Start run {} - {} @ {} with {} tests\".format(self.app_id,\n self.test_run.name,\n self.test_run.date,\n self.test_run.total))\n\n status_dict = {}\n # Test run name\n status_dict[offset + self.PIN_NAME] = self.test_run.name\n # Test run start datetime\n status_dict[offset + self.PIN_DATE] = self.test_run.date\n # Test run advance status string\n status_dict[offset + self.PIN_STATUS_TEXT] = \"{}/{}\".format(self.test_run.actual,\n self.test_run.total)\n # Test run advance status percent\n percent = self.test_run.actual / self.test_run.total * 100\n status_dict[offset + self.PIN_STATUS_GRAPH] = percent\n # Test run result type numbers\n status_dict[offset + self.PIN_TYPES] = \"S{} F{} B{}\".format(self.test_run.succeed,\n self.test_run.failed,\n self.test_run.blocked)\n # Test run led TODO manage color\n status_dict[offset + self.PIN_LED] = 255\n\n self.post_dict(status_dict)" ]
[ "0.65843594", "0.642197", "0.642197", "0.642197", "0.642197", "0.6393079", "0.6393079", "0.62220234", "0.6220406", "0.61894155", "0.6134013", "0.6066548", "0.5948006", "0.5948006", "0.59287864", "0.590314", "0.58682877", "0.58659166", "0.5864398", "0.5814855", "0.5802529", "0.5738549", "0.5736257", "0.5718756", "0.5718564", "0.571563", "0.5709198", "0.56872153", "0.5681927", "0.5666331" ]
0.7500514
0
Methode requestTestRunResults gibt das Ergebnis des TestRuns zurueck.
def requestTestRunResults(self, testRunRef: str) -> dict: jsonData = {} url = testRunRef r = requests.get(url, proxies=self.__proxies) if r.status_code == 200: jsonData = json.loads(r.text) else: message = '%s: %s' % (r.status_code, 'Test Run Result not found') raise ConnectionError(message) return jsonData
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTestResults():", "def test_get_results(self):\n pass", "def upload_test_run_results():\n if debug:\n print('[DEBUG] Func: upload_test_run_results...')\n\n if new_test_run_id == 0:\n print('[ERROR] new_test_run: id could not be found... ' + str(new_test_run_id))\n sys.exit(1)\n\n if debug:\n print('[DEBUG] Adding results to new test run: ID: {0}...'.format(new_test_run_id))\n\n upload_results_url = \"https://eei.testrail.com/index.php?/api/v2/add_result_for_case/{0}/{1}=\".format(new_test_run_id, testcase_id)\n\n upload_results_json = {\n \"status_id\": status_id,\n \"comment\": comment,\n \"version\": \"1\",\n \"elapsed\": \"2m\",\n \"custom_step_results\": json.loads(custom_step_results)\n }\n\n update_results = requests.post(upload_results_url, auth=authorization, json=upload_results_json)\n\n if str(update_results.status_code) != '200':\n print('[ERROR] update_results: non 200 status code... ' + str(update_results.status_code))\n print(str(update_results.json()))\n sys.exit(1)", "def testcases(self, request, *args, **kwargs):\n response = self.retrieve(request, *args, **kwargs)\n response.data = response.data['testcases']\n return response", "def test_get_run(self):\n pass", "def getResults():", "def _GetAllTestRuns(self, ispy):\n template = JINJA.get_template('list_view.html')\n data = {}\n max_keys = 1000\n marker = 'failures/%s' % self.request.get('marker')\n test_runs = list([path.split('/')[1] for path in\n ispy.GetAllPaths('failures/', max_keys=max_keys,\n marker=marker, delimiter='/')])\n base_url = '/?test_run=%s'\n next_url = '/?marker=%s' % test_runs[-1]\n data['next_url'] = next_url\n data['links'] = [(test_run, base_url % test_run) for test_run in test_runs]\n self.response.write(template.render(data))", "def __send_all(self):\n\n offset = self.app_id * 10\n\n print(\"Start run {} - {} @ {} with {} tests\".format(self.app_id,\n self.test_run.name,\n self.test_run.date,\n self.test_run.total))\n\n status_dict = {}\n # Test run name\n status_dict[offset + self.PIN_NAME] = self.test_run.name\n # Test run start datetime\n status_dict[offset + self.PIN_DATE] = self.test_run.date\n # Test run advance status string\n status_dict[offset + self.PIN_STATUS_TEXT] = \"{}/{}\".format(self.test_run.actual,\n self.test_run.total)\n # Test run advance status percent\n percent = self.test_run.actual / self.test_run.total * 100\n status_dict[offset + self.PIN_STATUS_GRAPH] = percent\n # Test run result type numbers\n status_dict[offset + self.PIN_TYPES] = \"S{} F{} B{}\".format(self.test_run.succeed,\n self.test_run.failed,\n self.test_run.blocked)\n # Test run led TODO manage color\n status_dict[offset + self.PIN_LED] = 255\n\n self.post_dict(status_dict)", "def requestTestRuns(self, testObjectId: str, testObjectFileName: str, testIdS: list) -> dict:\n jsonData = {}\n t = time.strftime(\"%d.%m.%Y %H:%M:%S\", time.localtime())\n label = '%s %s %s %s' % ('Test run on', t, 'with test suits for', testObjectFileName)\n \n jsonBody = {\n \"label\": label,\n \"executableTestSuiteIds\": testIdS,\n \"arguments\": {\n \"files_to_test\": \".*\",\n \"tests_to_execute\": \".*\"\n },\n \"testObject\": {\n \"id\": testObjectId\n }\n }\n data = json.dumps(jsonBody, indent=4, separators=(',', ': '))\n \n url = '%s%s' % (self.__urlWebApp, '/v2/TestRuns')\n headers = {'Content-Type': 'application/json', 'Accept': 'application/json'}\n r = requests.post(url, data=data, headers=headers, proxies=self.__proxies)\n \n if r.status_code == 201:\n jsonData = json.loads(r.text)\n elif r.status_code == 400:\n message = '%s: %s' % (r.status_code, 'Invalid request')\n raise ConnectionError(message)\n elif r.status_code == 404:\n message = '%s: %s' % (r.status_code, 'Test Object or Executable Test Suite with ID not found')\n raise ConnectionError(message)\n elif r.status_code == 409:\n message = '%s: %s' % (r.status_code, 'Test Object already in use')\n raise ConnectionError(message)\n else:\n message = '%s: %s' % (r.status_code, 'Internal error')\n raise ConnectionError(message)\n \n return jsonData", "def test(self):\n return self._test(result_count=1, failure_amount=1)", "def test(self):\n return self._test(result_count=1, failure_amount=1)", "def run_tests(self):\n with self.report.timer.record(\"run\"):\n self.result.report.extend(self._run_tests())", "def _collect_test_result(duthost, ptfhost, request):\n logger.info(\"Collecting test result and related information.\")\n # TODO : collect DUT test report\n _collect_sonic_os_and_platform_info(duthost, request)\n _collect_sai_test_report_xml(ptfhost, request)", "def test_list_runs(self):\n pass", "def get(self):\n diag = int(self.request.get('diag', default_value=False))\n if diag:\n self.run_diagnostics()\n return\n \n run_suite = self.request.get('suite', default_value=None)\n if not run_suite:\n self.welcome()\n return\n \n ## verbosity should probably be on always.\n verbosity = self.request.get('verbosity', default_value='0')\n ## teardown controls whether or not the tests clean up after themselves.\n teardown = int(self.request.get('teardown', default_value='1'))\n \n ## These are sort of like global variables. Sloppy programming I know.\n CurlTestBase._verbosity = int(verbosity)\n CurlTestBase._verbose_output = ''\n CurlTestBase._teardown = teardown\n \n ## The test suites to select from.\n suite_dict = {}\n suite_dict['awake'] = unittest.TestLoader().loadTestsFromTestCase(CurlTestAwake);\n suite_dict['sync'] = unittest.TestLoader().loadTestsFromTestCase(CurlTestBlobSync);\n suite_dict['async'] = unittest.TestLoader().loadTestsFromTestCase(CurlTestBlobASync);\n suite_dict['s3'] = unittest.TestLoader().loadTestsFromTestCase(CurlTestBlobS3);\n result = unittest.TestResult()\n \n ## Run the selected suite\n suite_dict[run_suite].run(result)\n# suite.debug()\n\n ## Bare bones result output.\n response = '<html><body>'\n response += '<h3>Summary:</h3><pre>' + cgi.escape(repr(result)) + '</pre>'\n if len(result.errors):\n for e in result.errors:\n response += '<h4>' + str(e[0]) + '</h4>'\n response += '<pre>' + e[1] + '</pre>'\n if CurlTestBase._verbosity:\n response += '<pre>' + cgi.escape(CurlTestBase._verbose_output) + '</pre>'\n response += '</body></html>'\n self.response.out.write(response)", "def test_results(self):\n result = self.test_client._results\n\n assert isinstance(result, list)\n assert len(result) == 1", "def run_tests(base_url, tests, format):\n\n\n # Keep track of stats\n results = []\n total_start_time = time.time()\n\n # Instance variables passed and failed contain stats\n # for the most recently run set of tests\n passed = 0\n failed = 0\n\n # Run and time each test\n for index, test in enumerate(tests):\n\n # Time the test until finished or until an exception occurs.\n test_start_time = time.time()\n\n result = test.run(base_url)\n\n test_elapsed_time = time.time() - test_start_time\n\n result[\"name\"] = test.name\n result[\"elapsed_time\"] = test_elapsed_time\n\n if result[\"status\"] == \"PASSED\":\n passed += 1\n else:\n failed += 1\n\n results.append(result)\n\n # Instance variable total_elapsed_time contains the\n # total runtime of the most recently run set of tests\n total_elapsed_time = time.time() - total_start_time\n\n total_tests = passed + failed\n total_tests = total_tests if total_tests > 0 else 1\n success_percent = (passed / total_tests) * 100\n\n summary = {\n \"passed\": passed,\n \"failed\": failed,\n \"success_percentage\": success_percent,\n \"total_elapsed_time\": total_elapsed_time\n }\n\n output_results(results, summary, format)", "def runTest(self):\n try:\n print('TestCase runTest===>')\n self.test_runner.run_test(self.testcase_dict)\n\n finally:\n self.meta_data = getattr(self.test_runner.http_client_session, \"meta_data\", {})", "def run(self):\r\n logging.info(\"Now excecuting test step {}\".format(self.stepname))\r\n try:\r\n response = eval(\"requests.{}('{}',params={})\".format(self.verb, self.url, self.payload))\r\n return response, True\r\n\r\n except requests.exceptions.RequestException as e:\r\n logging.warn(\"test {} failed\".format(self.stepname))\r\n \r\n return None, False", "def run( self, test ):\n\n result = self._makeResult()\n test( result )\n result.printErrors()\n self.stream.writeln( result.separator2 )\n run = result.testsRun\n self.stream.writeln()\n\n if not result.wasSuccessful():\n self.stream.write( \"FAILED (\" )\n failed, errored = map( len, ( result.failures, result.errors ) )\n if failed:\n self.stream.write( \"failures=%d\" % failed )\n if errored:\n if failed: self.stream.write( \", \" )\n self.stream.write( \"errors=%d\" % errored )\n self.stream.writeln( \")\" )\n else:\n self.stream.writeln( \"OK\" )\n \n return result", "def query_test_results(self, res, step_name='query_test_results'):\n return self._proto_step_result(res, step_name)", "def add_test_case_results(\n self,\n test_results: TestCaseResult,\n test_case_fqn: str,\n ):\n resp = self.client.put(\n f\"{self.get_suffix(TestCase)}/{quote(test_case_fqn,safe='')}/testCaseResult\",\n test_results.json(),\n )\n\n return resp", "def test_get_results_verbose(self):\n\t\tpass", "def runtest(self):", "def runTests(self):\n \n pass", "def get_results(self):\n result = [round(self.mr / self.test_size, 1), round(self.mrr / self.test_size, 3),\n round(self.hits1 / self.test_size, 3), round(self.hits3 / self.test_size, 3),\n round(self.hits5 / self.test_size, 3), round(self.hits10 / self.test_size, 3)]\n return result", "def run_single_test(self, config):\n path_name = config['path_name']\n for request in config['request']:\n with self.subTest(request=request, test_name=config['test_name']):\n if 'args' in request:\n url = reverse(path_name, kwargs=request['args'])\n else:\n url = reverse(path_name)\n\n query_params = None\n if 'query_params' in request:\n query_params = urlencode(request['query_params'])\n url = '{}?{}'.format(url, query_params)\n\n data = None\n data_format = 'json'\n if 'data' in request:\n data = request['data']\n\n if 'data_format' in request:\n data_format = request['data_format']\n\n response_check = None\n if 'response_check' in request:\n response_check = request['response_check']\n\n self.call_api(\n url,\n data,\n self.tokens[request['user']],\n request['status'],\n config['type'],\n data_format=data_format,\n response_check=response_check)", "def get_test_runs(self, testrun_id=None, cluster_id=None):\n url = '/testruns'\n if testrun_id is not None:\n url += '/{}'.format(testrun_id)\n if cluster_id is not None:\n url += '/{}'.format(cluster_id)\n elif cluster_id is not None:\n url += '/last/{}'.format(cluster_id)\n return self._client.get(url=url).json()", "def run_tests(test_run_params):\n classname = test_run_params[\"classname\"]\n params = (\"bin/test\", \"-t\", classname)\n\n env = environ.copy()\n env[\"ZSERVER_PORT\"] = str(55000 + test_run_params[\"port\"])\n\n try:\n with open(devnull, \"wb\") as dn:\n output = check_output(params, stderr=dn, env=env, universal_newlines=True)\n failed = False\n except CalledProcessError as e:\n output = e.output\n failed = True\n\n try:\n for line in output.splitlines():\n if line.startswith(\" Ran\"):\n count = int(line.split()[1])\n runtime = parse_time(line.split(\" in \")[-1].strip(\".\"))\n break\n speed = runtime / count\n # We trim out the uninteresting bit of the dotted path for readability\n module = \".\".join(classname.split(\".\")[-2:])\n testclass = \".\".join(classname.split(\".\")[:2])\n classname = \"{} {}\".format(testclass, module)\n return {\n \"classname\": classname,\n \"count\": count,\n \"runtime\": runtime,\n \"speed\": speed,\n \"failed\": failed,\n }\n except BaseException:\n return {\"classname\": classname, \"failed\": True}", "def run_tests(self, cluster_id, test_sets, test_name=None):\n # get tests otherwise 500 error will be thrown6^40\n self.get_tests(cluster_id)\n json = []\n for test_set in test_sets:\n record = {\n 'metadata': {'cluster_id': str(cluster_id), 'config': {}},\n 'testset': test_set\n }\n if test_name is not None:\n record['tests'] = [test_name]\n\n json.append(record)\n\n return self._client.post(\"/testruns\", json=json).json()" ]
[ "0.722702", "0.68538225", "0.6756852", "0.6595341", "0.6523461", "0.645256", "0.6393136", "0.6381202", "0.6372013", "0.6318589", "0.6318589", "0.630098", "0.6296187", "0.6266045", "0.62650174", "0.6251577", "0.6247649", "0.6194278", "0.6172445", "0.6065245", "0.60328704", "0.6031412", "0.60163206", "0.5985925", "0.5982106", "0.59764445", "0.59709275", "0.5945295", "0.59433484", "0.5931108" ]
0.71555626
1
evaluates other metrics for the dataset
def __evaluate_other_metrics(dataset, m, y_act, y_pred): return evaluate_metric(y_act, y_pred, m, dataset.y_n_classes)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_dataset_metrics(self):\n pass", "def evaluate(self, dataset):\n\t\tpass", "def compute_metrics(self):\n pass", "def __evaluate_metric(dataset, y_act, y_pred):\n if dataset.metric == 'specific':\n if dataset.best_is_min:\n return return_specific_metrics(y_act, y_pred)\n else:\n return -return_specific_metrics(y_act, y_pred)\n else:\n return evaluate_metric(y_act, y_pred, dataset.metric, dataset.y_n_classes)", "def model_evaluate(model,x_train,n_y_array,x_val, vald_array):\n\n scores = model.evaluate(x_train, n_y_array, verbose=1)\n\n scores2 = model.evaluate(x_val, vald_array, verbose=1)\n\n\n print(\"for traininf set\")\n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores[0]))\n\n\n\n print(\"for validation set : \") \n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores2[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores2[0]))", "def _evaluate(dataset: dict, name: str, metrics=None):\n if metrics is None:\n metrics = ['Accuracy', 'AUROC', 'AUPRC', 'Precision', 'Recall', 'F1', 'F2']\n measures = [dataset[metric] for metric in metrics]\n measures.insert(0, name)\n return measures", "def make_metrics(self):\n num_batches = self.data_loader.number_of_batches()\n dose_score_vec = np.zeros(num_batches)\n\n # Only make calculations if data_loader is not empty\n if not self.data_loader.file_paths_list:\n print('No patient information was given to calculate metrics')\n else:\n # Change batch size to 1\n self.data_loader.batch_size = 1 # Loads data related to ground truth patient information\n if self.dose_loader is not None:\n self.dose_loader.batch_size = 1 # Loads data related to ground truth patient information\n\n for idx in tqdm.tqdm(range(num_batches)):\n # Get roi masks for patient\n self.get_constant_patient_features(idx)\n # Get dose tensors for reference dose and evaluate criteria\n reference_dose = self.get_patient_dose_tensor(self.data_loader)\n if reference_dose is not None:\n self.reference_dose_metric_df = self.calculate_metrics(self.reference_dose_metric_df, reference_dose)\n # If a dose loader was provided, calculate the score\n if self.dose_loader is not None:\n new_dose = self.get_patient_dose_tensor(self.dose_loader)\n # Make metric data frames\n self.new_dose_metric_df = self.calculate_metrics(self.new_dose_metric_df, new_dose)\n # Evaluate mean absolute error of 3D dose\n dose_score_vec[idx] = np.sum(np.abs(reference_dose - new_dose)) / np.sum(self.possible_dose_mask)\n # Save metrics at the patient level (this is a template for how DVH stream participants could save\n # their files\n # self.dose_metric_df.loc[self.patient_list[0]].to_csv('{}.csv'.format(self.patient_list[0]))\n\n if self.dose_loader is not None:\n dvh_score = np.nanmean(np.abs(self.reference_dose_metric_df - self.new_dose_metric_df).values)\n dose_score = dose_score_vec.mean()\n return dvh_score, dose_score\n else:\n print('No new dose provided. Metrics were only calculated for the provided dose.')", "def metrics(self):\n \n if self.mse.shape[0]>1:\n raise ValueError('Metrics can only handle single observations.')\n \n if self.N==1:\n pred = float('nan')\n err = float('nan')\n y_true = float('nan')\n else:\n pred = int(self._predictions[-1])\n err = self._mse[-1]\n y_true = int(self.label[0])\n \n is_outlier = {\"type\":\"GAUGE\",\"key\":\"is_outlier\",\"value\":pred}\n mse = {\"type\":\"GAUGE\",\"key\":\"mse\",\"value\":err}\n obs = {\"type\":\"GAUGE\",\"key\":\"observation\",\"value\":self.N - 1}\n threshold = {\"type\":\"GAUGE\",\"key\":\"threshold\",\"value\":self.threshold}\n \n label = {\"type\":\"GAUGE\",\"key\":\"label\",\"value\":y_true}\n \n accuracy_tot = {\"type\":\"GAUGE\",\"key\":\"accuracy_tot\",\"value\":self.metric[4]}\n precision_tot = {\"type\":\"GAUGE\",\"key\":\"precision_tot\",\"value\":self.metric[5]}\n recall_tot = {\"type\":\"GAUGE\",\"key\":\"recall_tot\",\"value\":self.metric[6]}\n f1_score_tot = {\"type\":\"GAUGE\",\"key\":\"f1_tot\",\"value\":self.metric[7]}\n f2_score_tot = {\"type\":\"GAUGE\",\"key\":\"f2_tot\",\"value\":self.metric[8]}\n \n accuracy_roll = {\"type\":\"GAUGE\",\"key\":\"accuracy_roll\",\"value\":self.metric[9]}\n precision_roll = {\"type\":\"GAUGE\",\"key\":\"precision_roll\",\"value\":self.metric[10]}\n recall_roll = {\"type\":\"GAUGE\",\"key\":\"recall_roll\",\"value\":self.metric[11]}\n f1_score_roll = {\"type\":\"GAUGE\",\"key\":\"f1_roll\",\"value\":self.metric[12]}\n f2_score_roll = {\"type\":\"GAUGE\",\"key\":\"f2_roll\",\"value\":self.metric[13]}\n \n true_negative = {\"type\":\"GAUGE\",\"key\":\"true_negative\",\"value\":self.metric[0]}\n false_positive = {\"type\":\"GAUGE\",\"key\":\"false_positive\",\"value\":self.metric[1]}\n false_negative = {\"type\":\"GAUGE\",\"key\":\"false_negative\",\"value\":self.metric[2]}\n true_positive = {\"type\":\"GAUGE\",\"key\":\"true_positive\",\"value\":self.metric[3]}\n \n nb_outliers_roll = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_roll\",\"value\":self.metric[14]}\n nb_labels_roll = {\"type\":\"GAUGE\",\"key\":\"nb_labels_roll\",\"value\":self.metric[15]}\n nb_outliers_tot = {\"type\":\"GAUGE\",\"key\":\"nb_outliers_tot\",\"value\":self.metric[16]}\n nb_labels_tot = {\"type\":\"GAUGE\",\"key\":\"nb_labels_tot\",\"value\":self.metric[17]}\n \n return [is_outlier,mse,obs,threshold,label,\n accuracy_tot,precision_tot,recall_tot,f1_score_tot,f2_score_tot,\n accuracy_roll,precision_roll,recall_roll,f1_score_roll,f2_score_roll,\n true_negative,false_positive,false_negative,true_positive,\n nb_outliers_roll,nb_labels_roll,nb_outliers_tot,nb_labels_tot]", "def compute_statistics(self):", "def set_metrics(self):", "def evaluate(self, dataset, *args, **kwargs):\n\n losses = []\n for sample in dataset:\n output = self.predict(sample, *args, **kwargs)\n losses.append(self.metric_loss(output, sample, *args, **kwargs))\n\n return losses", "def evaluate(self):\n results = dict()\n for metric in self.metrics:\n print('Evaluating clustering with metric %s' % metric)\n if metric in LABEL_METRICS.keys():\n results[metric] = LABEL_METRICS[metric](self.X, self.model.labels_)\n results['adjusted_rand_score'] = SCORE_METRICS['adjusted_rand_score'](self.Y[:, 0], self.model.labels_)\n self.results = results\n return results", "def _eval_classifier(self):\n\n y_pred_baseline = self.df_baseline[self.score_column]\n y_pred_sample = self.df_sample[self.score_column]\n\n y_label_baseline = self.df_baseline[self.label_column]\n y_label_sample = self.df_sample[self.label_column]\n\n precision_baseline = precision_score(y_label_baseline, y_pred_baseline)\n recall_baseline = recall_score(y_label_baseline, y_pred_baseline)\n acc_baseline = accuracy_score(y_label_baseline, y_pred_baseline)\n f1_baseline = f1_score(y_label_baseline, y_pred_baseline)\n try:\n auc_baseline = roc_auc_score(y_label_baseline, y_pred_baseline)\n except ValueError:\n auc_baseline = \"NA\"\n\n precision_sample = precision_score(y_label_sample, y_pred_sample)\n recall_sample = recall_score(y_label_sample, y_pred_sample)\n acc_sample = accuracy_score(y_label_sample, y_pred_sample)\n f1_sample = f1_score(y_label_sample, y_pred_sample)\n try:\n auc_sample = roc_auc_score(y_label_sample, y_pred_sample)\n except ValueError:\n auc_sample = \"NA\"\n\n metrics_df = pd.DataFrame(\n {\n \"Accuracy\": [acc_baseline, acc_sample],\n \"Precision\": [precision_baseline, precision_sample],\n \"Recall\": [recall_baseline, recall_sample],\n \"F1\": [f1_baseline, f1_sample],\n \"AUC\": [auc_baseline, auc_sample],\n },\n index=[\"baseline\", \"sample\"],\n )\n\n self.performance_comparison = metrics_df", "def _evaluate(self, train_x, train_y, test_x, test_y, n_targets, name):\n r_temp = {}\n for metric_name in self.metrics:\n r_temp.update({f\"{metric_name}_Model\": name, f\"{metric_name}_Sum\": 0,\n f\"{metric_name}_Min\": 1000000, f\"{metric_name}_Max\": 0})\n\n for i in range(self.repetitions):\n is_nan = True\n while (is_nan):\n model = self.get_model(train_x.shape[1], n_targets)\n model.fit(train_x, train_y, **self.fit_kwargs)\n result = model.predict(test_x)\n is_nan = np.any(np.isnan(result))\n del model\n\n for metric_name in self.metrics:\n metric = self.get_metrics(metric_name)\n value = metric(result, test_y)\n r_temp[f\"{metric_name}_Sum\"] += value\n if r_temp[f\"{metric_name}_Min\"] > value:\n r_temp[f\"{metric_name}_Min\"] = value\n if r_temp[f\"{metric_name}_Max\"] < value:\n r_temp[f\"{metric_name}_Max\"] = value\n keras.backend.clear_session()\n for metric_name in self.metrics:\n r_temp[f\"{metric_name}_Mean\"] = r_temp[f\"{metric_name}_Sum\"] / self.repetitions\n return r_temp", "def evaluate(self, dataset):\n logging.info('Start evaluation')\n\n loss, predictions, labels = self.run_one_epoch(dataset, RunnerPhase.VALIDATE)\n\n metrics_dict = self.metric_class.get_metrics_dict(predictions, labels)\n\n eval_info = self.metric_class.metrics_dict_to_str(metrics_dict)\n\n logging.info(eval_info)\n\n logging.info('Evaluation finished')\n\n return metrics_dict", "def evaluate(self, test_data, test_labels):\n raise NotImplementedError", "def evaluate(self) -> Dict[str, float]:\n eval_dataloader = self.get_eval_dataloader()\n\n output = self._prediction_loop(eval_dataloader, description=\"Evaluation\")\n return output.metrics", "def calculate_metrics(self, metric_df, dose):\n # Prepare to iterate through all rois\n roi_exists = self.roi_mask.max(axis=(0, 1, 2))\n voxels_in_tenth_of_cc = np.maximum(1, np.round(100/self.voxel_size)) #\n for roi_idx, roi in enumerate(self.data_loader.full_roi_list):\n if roi_exists[roi_idx]:\n roi_mask = self.roi_mask[:, :, :, roi_idx].flatten()\n roi_dose = dose[roi_mask]\n roi_size = len(roi_dose)\n if roi in self.data_loader.rois['oars']:\n if 'D_0.1_cc' in self.oar_eval_metrics:\n # Find the fractional volume in 0.1cc to evaluate percentile\n fractional_volume_to_evaluate = 100 - voxels_in_tenth_of_cc/roi_size * 100\n metric_eval = np.percentile(roi_dose, fractional_volume_to_evaluate)\n metric_df.at[self.patient_list[0], ('D_0.1_cc', roi)] = metric_eval\n if 'mean' in self.oar_eval_metrics:\n metric_eval = roi_dose.mean()\n metric_df.at[self.patient_list[0], ('mean', roi)] = metric_eval\n elif roi in self.data_loader.rois['targets']:\n if 'D_99' in self.tar_eval_metrics:\n metric_eval = np.percentile(roi_dose, 1)\n metric_df.at[self.patient_list[0], ('D_99', roi)] = metric_eval\n if 'D_95' in self.tar_eval_metrics:\n metric_eval = np.percentile(roi_dose, 5)\n metric_df.at[self.patient_list[0], ('D_95', roi)] = metric_eval\n if 'D_1' in self.tar_eval_metrics:\n metric_eval = np.percentile(roi_dose, 99)\n metric_df.at[self.patient_list[0], ('D_1', roi)] = metric_eval\n\n return metric_df", "def evaluate(self, data, metric, classes=None):\n func_dict = {\n 'mutual_information': sklearn.metrics.mutual_info_score,\n 'normed_mutual_information': sklearn.metrics.normalized_mutual_info_score,\n 'square_error': sklearn.metrics.mean_squared_error,\n 't-test': scipy.stats.ttest_ind,\n 'wilcoxon': scipy.stats.wilcoxon,\n 'correlation': np.corrcoef\n }\n self.make_signature(data, classes)\n try:\n if metric in {'mutual_information', 'normed_mutual_information'}:\n self.score = func_dict[metric](classes, self.digit_signature()) \n elif metric == 'square_error':\n self.score = func_dict[metric](classes, self.signatures)\n elif metric in {'t-test', 'wilcoxon'} :\n self.score = np.abs(func_dict[metric](self.signatures[classes==1], \\\n self.signatures[classes==0])[0])\n \n elif metric == 'correlation':\n self.score = func_dict[metric](classes, self.signatures)[1,0]\n \n except: KeyError(\"no such a function\") \n \n return self.score", "def evaluate(self, X: np.ndarray, y: list, X_train=None, y_train=None) -> dict:\n metrics = self.compute_metrics(X, y)\n\n print(\"evaluation: \", metrics)\n return metrics", "def compute_metrics(self, results: list) -> dict:", "def run(self):\n self.evaluate()\n self.accumulate()\n self.summarize()", "def calculate_batch_metrics(self):\n pass", "def eval_metrics(actual, pred):\r\n rmse = np.sqrt(mean_squared_error(actual, pred))\r\n mae = mean_absolute_error(actual, pred)\r\n r2 = r2_score(actual, pred)\r\n return rmse, mae, r2", "def evaluate_with_metrics(self, dataset, metrics, *args, **kwargs):\n\n utils.assert_raise(isinstance(metrics, dict), ValueError,\n '\"metrics\" must be a dict with metric_name -> metric_function')\n result = dict()\n\n for sample in dataset:\n output = self.predict(sample)\n\n for key, call in metrics.items():\n holder = result.get(key, list())\n holder.append(call(output, sample))\n\n result[key] = holder\n\n return result", "def _evaluate(self, y_true, y_pred):\n pass", "def evaluate(model, loss_func, dataloader, metrics):\r\n model.eval()\r\n summ = []\r\n device = utils.get_device()\r\n with torch.no_grad():\r\n for data in dataloader:\r\n sentences1, starts1, ends1, sentences2, starts2, ends2, inputY = data\r\n inputY = inputY.to(device)\r\n output_batch = model(sentences1, starts1, ends1, sentences2, starts2, ends2)\r\n loss = loss_func(output_batch, inputY)\r\n output_batch = output_batch.data.cpu().numpy()\r\n inputY = inputY.data.cpu().numpy()\r\n summary_batch = {metric: metrics[metric](\r\n output_batch, inputY) for metric in metrics}\r\n summary_batch['loss'] = loss.item()\r\n summ.append(summary_batch)\r\n # print(\"summ:{}\".format(summ))\r\n metrics_mean = {metric: np.mean([x[metric]\r\n for x in summ]) for metric in summ[0]}\r\n metrics_string = \" ; \".join(\"{}: {:05.3f}\".format(k, v)\r\n for k, v in metrics_mean.items())\r\n logging.info(\"- Eval metrics : \" + metrics_string)\r\n return metrics_mean", "def compute_metrics(self):\n self.finalize_output_dict()\n self.metric_dict = {\n key: value(self.output_dict[\"labels\"], self.output_dict[\"pred_probs\"])\n for key, value in self.metric_fns.items()\n }", "def evaluateWithSeveralMetrics(self, dataset, metricSets=None):\n if metricSets is None: # all metrics\n metricSets = [{\"metricName\": \"areaUnderROC\"},\n {\"metricName\": \"areaUnderPR\"},\n {\"metricName\": \"precisionAtGivenRecall\", \"metricParams\": {\"recallValue\": 0.05}}] \n resultMetricSets = [None for _ in range(len(metricSets))]\n pagrs = []\n for i in range(len(metricSets)):\n params = metricSets[i]\n if params[\"metricName\"] != \"precisionAtGivenRecall\":\n value = self.evaluate(dataset, params)\n if len(params.keys()) == 1:\n key = params[\"metricName\"]\n else:\n key = params[\"metricName\"] + \" at recallValue \" + str(params[\"metricParams\"][\"recallValue\"])\n resultMetricSets[i] = {key:value}\n else: \n pagrs.append([i,params[\"metricParams\"][\"recallValue\"]])\n continue\n if None in resultMetricSets:\n pr_params = {\"metricName\": \"precisionAtGivenMultipleRecalls\", \"metricParams\": {\"recallValues\": [x[1] for x in pagrs]}}\n precisions = self.evaluate(dataset, pr_params)\n i = 0\n for item in pagrs:\n key = \"precisionAtGivenRecall\" + \" at recallValue \" + str(pagrs[i][1])\n resultMetricSets[item[0]] = {key:precisions[i]}\n i += 1 \n \n return resultMetricSets", "def do_system_evaluation(dataset, result_path, dataset_evaluation_mode='folds'):\n\n # Set warnings off, sklearn metrics will trigger warning for classes without\n # predicted samples in F1-scoring. This is just to keep printing clean.\n warnings.simplefilter(\"ignore\")\n\n overall_metrics_per_scene = {}\n\n for scene_id, scene_label in enumerate(dataset.scene_labels):\n if scene_label not in overall_metrics_per_scene:\n overall_metrics_per_scene[scene_label] = {}\n\n dcase2016_segment_based_metric = DCASE2016_EventDetection_SegmentBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))\n dcase2016_event_based_metric = DCASE2016_EventDetection_EventBasedMetrics(class_list=dataset.event_labels(scene_label=scene_label))\n\n for fold in dataset.folds(mode=dataset_evaluation_mode):\n results = []\n result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)\n\n if os.path.isfile(result_filename):\n with open(result_filename, 'rt') as f:\n for row in csv.reader(f, delimiter='\\t'):\n results.append(row)\n else:\n raise IOError(\"Result file not found [%s]\" % result_filename)\n\n for file_id, item in enumerate(dataset.test(fold, scene_label=scene_label)):\n current_file_results = []\n for result_line in results:\n if len(result_line) != 0 and result_line[0] == dataset.absolute_to_relative(item['file']):\n current_file_results.append(\n {'file': result_line[0],\n 'event_onset': float(result_line[1]),\n 'event_offset': float(result_line[2]),\n 'event_label': result_line[3].rstrip()\n }\n )\n meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))\n\n dcase2016_segment_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)\n dcase2016_event_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)\n\n overall_metrics_per_scene[scene_label]['segment_based_metrics'] = dcase2016_segment_based_metric.results()\n overall_metrics_per_scene[scene_label]['event_based_metrics'] = dcase2016_event_based_metric.results()\n\n print \" Evaluation over %d folds\" % dataset.fold_count\n print \" \"\n print \" Results per scene \"\n print \" {:18s} | {:5s} | | {:39s} \".format('', 'Main', 'Secondary metrics')\n print \" {:18s} | {:5s} | | {:38s} | {:14s} | {:14s} | {:14s} \".format('', '', 'Seg/Overall','Seg/Class', 'Event/Overall','Event/Class')\n print \" {:18s} | {:5s} | | {:6s} : {:5s} : {:5s} : {:5s} : {:5s} | {:6s} : {:5s} | {:6s} : {:5s} | {:6s} : {:5s} |\".format('Scene', 'ER', 'F1', 'ER', 'ER/S', 'ER/D', 'ER/I', 'F1', 'ER', 'F1', 'ER', 'F1', 'ER')\n print \" -------------------+-------+ +--------+-------+-------+-------+-------+--------+-------+--------+-------+--------+-------+\"\n averages = {\n 'segment_based_metrics': {\n 'overall': {\n 'ER': [],\n 'F': [],\n },\n 'class_wise_average': {\n 'ER': [],\n 'F': [],\n }\n },\n 'event_based_metrics': {\n 'overall': {\n 'ER': [],\n 'F': [],\n },\n 'class_wise_average': {\n 'ER': [],\n 'F': [],\n }\n },\n }\n for scene_id, scene_label in enumerate(dataset.scene_labels):\n print \" {:18s} | {:5.2f} | | {:4.1f} % : {:5.2f} : {:5.2f} : {:5.2f} : {:5.2f} | {:4.1f} % : {:5.2f} | {:4.1f} % : {:5.2f} | {:4.1f} % : {:5.2f} |\".format(scene_label,\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['ER'],\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['F'] * 100,\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['ER'],\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['S'],\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['D'],\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['I'],\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['F']*100,\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['ER'],\n overall_metrics_per_scene[scene_label]['event_based_metrics']['overall']['F']*100,\n overall_metrics_per_scene[scene_label]['event_based_metrics']['overall']['ER'],\n overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['F']*100,\n overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['ER'],\n )\n averages['segment_based_metrics']['overall']['ER'].append(overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['ER'])\n averages['segment_based_metrics']['overall']['F'].append(overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['F'])\n averages['segment_based_metrics']['class_wise_average']['ER'].append(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['ER'])\n averages['segment_based_metrics']['class_wise_average']['F'].append(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['F'])\n averages['event_based_metrics']['overall']['ER'].append(overall_metrics_per_scene[scene_label]['event_based_metrics']['overall']['ER'])\n averages['event_based_metrics']['overall']['F'].append(overall_metrics_per_scene[scene_label]['event_based_metrics']['overall']['F'])\n averages['event_based_metrics']['class_wise_average']['ER'].append(overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['ER'])\n averages['event_based_metrics']['class_wise_average']['F'].append(overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['F'])\n\n print \" -------------------+-------+ +--------+-------+-------+-------+-------+--------+-------+--------+-------+--------+-------+\"\n print \" {:18s} | {:5.2f} | | {:4.1f} % : {:5.2f} : {:21s} | {:4.1f} % : {:5.2f} | {:4.1f} % : {:5.2f} | {:4.1f} % : {:5.2f} |\".format('Average',\n numpy.mean(averages['segment_based_metrics']['overall']['ER']),\n numpy.mean(averages['segment_based_metrics']['overall']['F'])*100,\n numpy.mean(averages['segment_based_metrics']['overall']['ER']),\n ' ',\n numpy.mean(averages['segment_based_metrics']['class_wise_average']['F'])*100,\n numpy.mean(averages['segment_based_metrics']['class_wise_average']['ER']),\n numpy.mean(averages['event_based_metrics']['overall']['F'])*100,\n numpy.mean(averages['event_based_metrics']['overall']['ER']),\n numpy.mean(averages['event_based_metrics']['class_wise_average']['F'])*100,\n numpy.mean(averages['event_based_metrics']['class_wise_average']['ER']),\n )\n\n print \" \"\n # Restore warnings to default settings\n warnings.simplefilter(\"default\")\n print \" Results per events \"\n\n for scene_id, scene_label in enumerate(dataset.scene_labels):\n print \" \"\n print \" \"+scene_label.upper()\n print \" {:20s} | {:30s} | | {:15s} \".format('', 'Segment-based', 'Event-based')\n print \" {:20s} | {:5s} : {:5s} : {:6s} : {:5s} | | {:5s} : {:5s} : {:6s} : {:5s} |\".format('Event', 'Nref', 'Nsys', 'F1', 'ER', 'Nref', 'Nsys', 'F1', 'ER')\n print \" ---------------------+-------+-------+--------+-------+ +-------+-------+--------+-------+\"\n seg_Nref = 0\n seg_Nsys = 0\n\n event_Nref = 0\n event_Nsys = 0\n for event_label in sorted(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise']):\n print \" {:20s} | {:5d} : {:5d} : {:4.1f} % : {:5.2f} | | {:5d} : {:5d} : {:4.1f} % : {:5.2f} |\".format(event_label,\n int(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['Nref']),\n int(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['Nsys']),\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['F']*100,\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['ER'],\n int(overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['Nref']),\n int(overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['Nsys']),\n overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['F']*100,\n overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['ER'])\n seg_Nref += int(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['Nref'])\n seg_Nsys += int(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['Nsys'])\n\n event_Nref += int(overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['Nref'])\n event_Nsys += int(overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['Nsys'])\n print \" ---------------------+-------+-------+--------+-------+ +-------+-------+--------+-------+\"\n print \" {:20s} | {:5d} : {:5d} : {:14s} | | {:5d} : {:5d} : {:14s} |\".format('Sum',\n seg_Nref,\n seg_Nsys,\n '',\n event_Nref,\n event_Nsys,\n '')\n print \" {:20s} | {:5s} {:5s} : {:4.1f} % : {:5.2f} | | {:5s} {:5s} : {:4.1f} % : {:5.2f} |\".format('Average',\n '', '',\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['F']*100,\n overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['ER'],\n '', '',\n overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['F']*100,\n overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['ER'])\n print \" \"" ]
[ "0.7678181", "0.7512594", "0.7453756", "0.71230525", "0.68468404", "0.67882264", "0.677536", "0.67679405", "0.67434514", "0.67238873", "0.6684663", "0.66521555", "0.6645087", "0.66166586", "0.6613626", "0.66093993", "0.66065264", "0.65886134", "0.657816", "0.65687567", "0.6567206", "0.6563755", "0.6533835", "0.65311116", "0.6524253", "0.65189445", "0.648739", "0.64663005", "0.6451182", "0.6448828" ]
0.76998883
0
Strips the string value if and only if all the characters in the string are not " ".
def strip_if_not_blank(value): if any([i != " " for i in value]): return value.strip() return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_strip_string(self, i_str):\n return ''.join(e for e in i_str if e.isalnum())", "def filter(string):\n # remove all unwanted characters\n return regex2.sub(' ', string)", "def strip_string(input):\n return input.lower().replace(\" \", \"\")", "def _clean(self, string):\n return re.sub('\\s+', ' ', string).strip()", "def filter_blanks(user, str):\n if user.is_staff:\n return str\n return re.sub(r'\\n{2}\\n+', '\\n', str)", "def clean(self, value):\n value = super().clean(value)\n if value in self.empty_values:\n return value\n return value.replace(' ', '')", "def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])", "def _filter_string(cls, string, extra_chars=\"\"):\n char_white_list = ascii_letters + digits + extra_chars\n return \"\".join([char for char in string if char in char_white_list])", "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "def filter_blanks(user, str):\n return re.sub(r'\\n{2}\\n+', '\\n', str)", "def clean_string(in_str):\n # Remove extra whitespaces\n in_str = ' '.join(in_str.split())\n # Remove whitespaces before punctuation\n in_str = re.sub(r'\\s([?.!\"](?:\\s|$))', r'\\1', in_str)\n\n return in_str", "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string) \n string = re.sub(r\"\\s{2,}\", \" \", string) \n return string.strip().lower()", "def scrub(input_string):\n return ''.join(k for k in input_string if k.isalnum())", "def string_cleanup(s, garbage=\":,-()&\"):\n s_new = ''\n for x in s:\n if x not in garbage:\n s_new += x\n\n return s_new", "def clean_string(value):\n\treturn re.sub(r'[^a-zA-Z0-9_.]', '', str(value))", "def clean_str_sst(string):\n string = re.sub(r\"[^A-Za-z0-9(),!?\\'\\`]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip().lower()", "def cleanString(self, s):\r\n s = s.lower()\r\n for x in s: \r\n if x in punctuation:\r\n s = s.replace(x, '')\r\n return s", "def clean(val):\n\n val = re.sub(r'/s+', r'/s', val)\n return val.strip()", "def replace_empty(s):\n if s == \"\":\n return \" \"\n else:\n return s", "def _clean(s):\n return re.sub(r'\\s+', ' ', s.strip())", "def clean_unnecessary_characters(self, tweet):\n tweet = tweet.lstrip(\"\\\"\").rstrip(\"\\\"\")\n tweet = re.sub(self.compiledAlphanumericRegex, ' ', tweet)\n tweet = tweet.replace('_', ' ')\n return tweet", "def _replace_non_alnum(self):\n no_punct = [x if x.isalnum() else ' ' for x in self._phrase.lower()]\n return ''.join(no_punct) # Convert an array of char to string", "def squeeze(value):\r\n return re.sub(r\"[\\x00-\\x20]+\", \" \", value).strip()", "def super_clean_str(string):\n return ''.join(x for x in string if x.isalnum()).lower()", "def strip_space(string):\n return string.replace(' ', '')", "def stripword( s ) :\n return re.sub( '[\\W\\d]', '', s )", "def clean_str(string):\n # Remove punctuation\n string = re.sub(r\"[^\\u4e00-\\u9fff]\", \" \", string)\n string = re.sub(r\"\\s{2,}\", \" \", string)\n return string.strip()", "def trimAlphaNum(self, value):\n\n while value and value[-1].isalnum():\n value = value[:-1]\n\n while value and value[0].isalnum():\n value = value[1:]\n\n return value", "def validate_strip(cls, value: str) -> str:\n if cls.strip is True:\n value = value.strip()\n return value", "def space_strip(string):\n string= re.sub(\"(?m)^\\s+\", \"\", string)\n return re.sub(\"(?m)\\s+$\", \"\", string)" ]
[ "0.7316561", "0.7145191", "0.7046523", "0.7038135", "0.70192903", "0.69609845", "0.6947088", "0.6947088", "0.69281536", "0.69197446", "0.6914073", "0.690112", "0.6887776", "0.6860018", "0.683223", "0.6832113", "0.6808764", "0.6795601", "0.67919624", "0.677004", "0.6751491", "0.6736513", "0.6710478", "0.67025936", "0.6702181", "0.669842", "0.66954565", "0.66852325", "0.6684002", "0.66741663" ]
0.7467689
0
Safely joins an array of iterables together while preventing empty/null values from corrupting the final string spacing and allowing prefix/suffix values to be included for each element if the element is not null.
def space_join(*items): valid_items = [] for item in items: if item is None: continue if isinstance(item, tuple): if item[0] is None: continue stripped = strip_if_not_blank(item[0]) if not is_null(stripped): if len(item) == 2: if not is_null(item[1]): valid_items.append("%s%s" % (item[1], stripped)) else: valid_items.append(stripped) elif len(item) >= 3: if not is_null(item[1]) and not is_null(item[2]): valid_items.append("%s%s%s" % ( item[1], stripped, item[2])) elif not is_null(item[1]): valid_items.append("%s%s" % (item[1], stripped)) elif not is_null(item[2]): valid_items.append("%s%s" % (stripped, item[2])) else: stripped = strip_if_not_blank(item) if stripped != "": valid_items.append(stripped) return " ".join(valid_items)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def join(self, iterable) -> String:\n pass", "def join(self, iterable):\n result = ANSIString(\"\")\n last_item = None\n for item in iterable:\n if last_item is not None:\n result += self._raw_string\n if not isinstance(item, ANSIString):\n item = ANSIString(item)\n result += item\n last_item = item\n return result", "def join_list(items: Iterable[str]) -> str:\n\n return ITEM_SEPARATOR.join(items)", "def join(sep, xs):\n return str(sep).join(xs)", "def concat(values, sep=', '):\n concat_str = None\n try:\n concat_str = sep.join([str(v) for v in values if not is_empty(v)])\n except Exception as e:\n pass\n return concat_str", "def Join(sourcearray, delimeter=\" \"):\n s_list = list(map(str, sourcearray))\n return delimeter.join(s_list)", "def underscore_join(iterable):\n iterable_as_str = [str(x) for x in iterable]\n return \"__\".join(iterable_as_str)", "def commajoin(array, elements_to_quote, indent=0):\n result = \" \" * indent + \"(\"\n for i in elements_to_quote:\n if array[i] == '':\n array[i] = \"NULL\"\n else:\n array[i] = \"'\" + str(array[i]) + \"'\"\n j = 0\n for i in array:\n result += str(i)\n if j != len(array) - 1:\n result += \", \"\n j += 1\n return result + \")\"", "def implicit_cat(values):\n values = [value for value in values if value is not None]\n if len(values) == 0:\n return None\n if len(values) == 1:\n return values[0]\n return \"\".join(str(value) for value in values)", "def join(sep, seq):\n return _to_bytes_or_str_array(\n _vec_string(sep, object_, 'join', (seq,)), seq)", "def implode(delim, items):\n return delim.join(items)", "def multilinify(sequence, sep=\",\"):\n sep += \"\\n\"\n return \"\\n\" + sep.join(sequence)", "def join_recursive(lst, sep):\n msg = ''\n for i in lst:\n if isinstance(i, tuple) or isinstance(i, list):\n msg += join_recursive(i, sep)\n else:\n msg += (i + sep)\n return msg", "def list_join(the_list):\n return ' '.join(the_list)", "def my_join(iters, string):\n out = ''\n for i in range(iters):\n out += string.join(\", \")\n return out", "def my_join(iters, string):\n out = ''\n for i in range(iters):\n out += \", \" + string\n return out", "def concatenate_items(items, conjunction='and'):\n text = ''\n if not items:\n text = ''\n elif len(items) == 1:\n text = items[0]\n elif len(items) == 2:\n text = '{} {} {}'.format(items[0], conjunction, items[1])\n else:\n text = ', '.join(items[:-1])\n text += ', {} {}'.format(conjunction, items[-1])\n return text", "def __join_expanded(expanded: list[typing.Union[str, list[str]]]) -> list[str]:\n list_values = [(i, val) for i, val in enumerate(expanded) if isinstance(val, list)]\n\n if len(list_values) == 0:\n return [\"\".join(expanded)]\n\n initial_len = len(list_values[0][1]) if list_values else None\n\n if not all(len(i) == initial_len for _, i in list_values[1::]):\n raise ValueError(\"not all non-expanded list are of the same size\")\n\n pairs = zip(*[[(i, j) for j in val] for i, val in list_values])\n\n result = list()\n for pair in pairs:\n cc = expanded.copy()\n\n for i, v in pair:\n del(cc[i])\n cc.insert(i, v)\n\n result.append(\"\".join(cc))\n\n return result", "def flatten_list(items: List[str]) -> str:\n if len(items) == 1:\n return f'\"{items[0]}\"'\n\n try:\n last = items[-1]\n except IndexError:\n # Empty list\n raise ValueError('Empty list of values received')\n\n return ', '.join(f'\"{item}\"' for item in items[:-1]) + f' or \"{last}\"'", "def concat_strings(l_strings):\n if l_strings == []:\n return \"\"\n else: \n return l_strings[0] + \" \" + concat_strings(l_strings[1:])", "def my_join(iters, string):\n out = \"\"\n for i in range(iters):\n out += \",\" + string \n return out", "def _concat(self, *args, **kwargs):\n values = list(args)\n output = []\n for value in values:\n if not isinstance(value, (str, basestring)):\n value = unicode(value)\n else:\n value = unicode(value)\n value = value.strip()\n output.append(value)\n output = kwargs[\"delimiter\"].join(output)\n output = unicode(output)\n return output", "def join_items(values, sort=False):\n\tif isinstance(values, str):\n\t\treturn clean_string(values)\n\n\ttry:\n\t\tval = []\n\t\tfor v in values:\n\t\t\tval.append(clean_string(v))\n\t\tif sort:\n\t\t\tval.sort()\n\t\treturn \"-\".join(val)\n\texcept TypeError:\n\t\treturn str(values)", "def array_to_concatenated_string(array):\r\n return \",\".join(str(x) for x in array)", "def robust_join(s, sep=','):\n return sep.join([str(e) for e in s])", "def _format_item_list(items, pad=\"'\", sep=', ', end_sep=' and '):\n result = ''\n items = [pad + item + pad for item in items]\n if items:\n if len(items) != 1:\n result = sep.join(items[:-1]) + end_sep + items[-1]\n else:\n result = items[0]\n return result", "def join_with_or(values) -> str:\n return join_with_and(values, 'or')", "def join_list(jlist, joiner=', '):\n if len(jlist) == 0:\n jlist = '[]'\n else:\n jlist = joiner.join(jlist)\n return jlist", "def bytearray_join(glue, list_of_barrays):\n res = list_of_barrays[0]\n for i in range(1,len(list_of_barrays)):\n res += glue + list_of_barrays[i]\n return res", "def array_to_concatenated_string(array):\n return \",\".join(str(x) for x in array)" ]
[ "0.6982282", "0.69374067", "0.6928608", "0.68068033", "0.6782663", "0.64416945", "0.6399586", "0.6399031", "0.6292311", "0.6265237", "0.62498164", "0.62322575", "0.6216144", "0.6168476", "0.61541164", "0.6151066", "0.6119639", "0.61163634", "0.6109299", "0.607979", "0.60750186", "0.6070868", "0.6042205", "0.6021278", "0.60101426", "0.5993096", "0.59873915", "0.59759057", "0.5966963", "0.59663093" ]
0.7044498
0
Given a child and a parent, tries to infer whether or not the child is an extension of the parent.
def extends_or_instance_of(child, parent): if isinstance(child, six.string_types): raise ValueError("The child cannot be of string type.") if isinstance(parent, six.string_types): if isinstance(child, type): bases = classlookup(child) return ( parent in [base.__name__ for base in bases] or child.__name__ == parent ) elif hasattr(child, '__class__'): return extends_or_instance_of(child.__class__, parent) else: raise ValueError("Invalid child type.") elif hasattr(parent, '__iter__'): extensions = [extends_or_instance_of(child, p) for p in parent] return any(extensions) # The parent is a class. elif isinstance(parent, type): if isinstance(child, type): # Note: We could probably also call recursively here. return parent in classlookup(child) or parent == child elif hasattr(child, '__class__'): return extends_or_instance_of(child.__class__, parent) else: raise ValueError("Invalid child type.") # The parent is a class instance. elif hasattr(parent, '__class__'): if isinstance(child, type): # Note: We could probably also call recursively here. return extends_or_instance_of(child, parent.__class__) elif hasattr(child, '__class__'): return extends_or_instance_of(child.__class__, parent.__class__) else: raise ValueError("Invalid child type.") else: raise ValueError("Invalid parent type.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _is_child(self, parent, child): # type: (str, str) -> bool\n return child != parent and child.startswith(parent + \".\")", "def isAncestorOf(ancestor, child):\n\twhile child is not None:\n\t\tif child is ancestor:\n\t\t\treturn True\n\t\tchild = child.parent()\n\treturn False", "def inherits_from(child, parent_name):\n if inspect.isclass(child):\n if parent_name in [c.__name__ for c in inspect.getmro(child)[1:]]:\n return True\n return False", "def is_extending_or_extended_by(self, other):\n raise NotImplementedError()", "def HasAncestor(self, other):\n return (self == other) or (self.parent and self.parent.HasAncestor(other))", "def is_known(self, child):\r\n return child in self._parents", "def is_parent(child, parent):\n # Get the list of processes\n assert child is not None\n assert parent is not None\n #child_ranks = [i for i in xrange(child.Get_size())]\n child_group = child.Get_group()\n parent_group = parent.Get_group()\n inter_group = MPI.Group.Intersect(child_group, parent_group)\n return child_group.Get_size() == inter_group.Get_size()", "def is_ancestor(parent_alphabet, child_alphabet):\r\n alphabet = parent_alphabet\r\n while alphabet:\r\n if child_alphabet == alphabet:\r\n return True\r\n alphabet = alphabet.alphabet\r\n return False", "def islchild(self):\n\t\tif (self.parent() and self.parent().lchild() is self): #TODO is or == here\n\t\t\treturn True\n\t\treturn False", "def is_ancestor(self, other):\n\n if other is self:\n return True\n elif hasattr(other, 'base'):\n return self.is_ancestor(other.base)\n else:\n return False", "def is_relative_to(sub_path, parent):\n try:\n parent_path = Path(parent).resolve()\n sub_path.resolve().relative_to(parent_path)\n return True\n except ValueError:\n return False", "def is_ancestor(ancestor, parent):\n try:\n subprocess.check_call([\"git\", \"merge-base\", \"--is-ancestor\", ancestor, parent],)\n return True\n except subprocess.CalledProcessError:\n return False", "def is_state_a_child(child: State, parent: State) -> bool:\n if child.x >= parent.x and child.y >= parent.y and child.x <= parent.x + parent.width and child.y<=parent.y+parent.height:\n return True\n return False", "def IsDescendantOf(self, parent, item):\r\n\r\n while item:\r\n \r\n if item == parent:\r\n \r\n # item is a descendant of parent\r\n return True\r\n \r\n item = item.GetParent()\r\n \r\n return False", "def is_subpath_of(parent, child):\n # Based on https://stackoverflow.com/a/37095733 .\n\n # In Python 3.9, the `Path.is_relative_to()` method will supplant this, so\n # we can stop using crusty old os.path functions.\n parent_realpath = os.path.realpath(parent)\n child_realpath = os.path.realpath(child)\n return os.path.commonpath([parent_realpath, child_realpath]) == parent_realpath", "def circular_checker(parent, child):\n if parent == child:\n raise ValidationError('Self links are not allowed.')\n\n if child.pk in parent.get_ancestor_pks():\n raise ValidationError('The object is an ancestor.')", "def can_add_child(self, child):\n if not self.is_valid_child(child):\n return False\n if child.isa == u'PBXGroup':\n return len(func.take(\\\n lambda c: c.pbx_name == child.pbx_name and c.realpath() == child.realpath(),\\\n self.pbx_children)) == 0\n else:\n return len(func.take(lambda c:c.realpath() == child.realpath(), self.pbx_children)) == 0", "def is_subhalo(self, childid, parentid):\n if (childid in self._halos[parentid].properties['children']):\n return True\n else:\n return False", "def is_sub(parent, path):\n parent = canonical_path(parent, resolve_link=False)\n path = canonical_path(path, resolve_link=False)\n return os.path.commonprefix([parent, path]) == parent", "def is_extended(self):\n return self._parent is not None", "def is_subclass(parent_class, child_class_name):\n for child_class in parent_class.__subclasses__():\n if child_class.__name__ == child_class_name:\n return True\n return False", "def is_child_class(obj, classinfo):\n try:\n return issubclass(obj, classinfo)\n except TypeError:\n return None", "def is_descendant(self, other):\n return other.is_ancestor(self)", "def has_parent(obj, parent_name):\n if obj.parent is None:\n return False\n if obj.parent.name is None:\n return False\n elif obj.parent.name == parent_name:\n return True\n else:\n return has_parent(obj.parent, parent_name)", "def is_valid_child(self, child):\n return isinstance(child, baseobject.PBXBaseObject) \\\n and child.isa in self.allow_children_types()", "def isancestor(s2,tree):\n if tree is s2: return True\n if tree is None: return False\n else:\n return isancestor(s2, tree.left) or isancestor(s2, tree.right)", "def hasChildren():", "def _is_hierachy_searchable(child_id: str) -> bool:\n pieces_of_child_id_list = child_id.split('.')\n suffix = pieces_of_child_id_list[len(pieces_of_child_id_list) - 1]\n return suffix.isnumeric()", "def is_child_graph(self, child_graph):\n # pylint: disable=protected-access\n if not child_graph or not child_graph._parent_graph:\n return False\n if child_graph._parent_graph == self:\n return True\n return self.is_child_graph(child_graph._parent_graph)\n # pylint: enable=protected-access", "def _is_child_path(path, parent_path, link_name=None):\n b_path = to_bytes(path, errors='surrogate_or_strict')\n\n if link_name and not os.path.isabs(b_path):\n # If link_name is specified, path is the source of the link and we need to resolve the absolute path.\n b_link_dir = os.path.dirname(to_bytes(link_name, errors='surrogate_or_strict'))\n b_path = os.path.abspath(os.path.join(b_link_dir, b_path))\n\n b_parent_path = to_bytes(parent_path, errors='surrogate_or_strict')\n return b_path == b_parent_path or b_path.startswith(b_parent_path + to_bytes(os.path.sep))" ]
[ "0.7233818", "0.633225", "0.6328605", "0.6110934", "0.6085241", "0.60491276", "0.6047496", "0.59858125", "0.59134716", "0.58713543", "0.5867895", "0.5824956", "0.5813192", "0.5767744", "0.5751257", "0.5695839", "0.5680158", "0.5635793", "0.5599923", "0.55797595", "0.55704015", "0.55369693", "0.5519212", "0.5486396", "0.54554695", "0.54496324", "0.5415137", "0.54105586", "0.5348228", "0.53308445" ]
0.75583863
0
Does a POST request to /accounts/viewaccount.{ResponseType}. Display Account Description
def create_view_account(self, options=dict()): # Validate required parameters self.validate_parameters(date = options.get("date")) # Prepare query URL _query_builder = Configuration.get_base_uri() _query_builder += '/accounts/viewaccount.{ResponseType}' _query_builder = APIHelper.append_url_with_template_parameters(_query_builder, { 'ResponseType': options.get('response_type', None) }) _query_url = APIHelper.clean_url(_query_builder) # Prepare form parameters _form_parameters = { 'date': options.get('date', None) } _form_parameters = APIHelper.form_encode_parameters(_form_parameters) # Prepare and execute request _request = self.http_client.post(_query_url, parameters=_form_parameters) BasicAuth.apply(_request) _context = self.execute_request(_request) self.validate_response(_context) # Return appropriate type return _context.response.raw_body
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_account_view_good(flask_server, create_account):\n import json\n import requests\n\n data = create_account\n\n req = requests.post('{}/account/view'.format(API_URL), data=data)\n assert req.status_code == 200\n assert json.loads(req.content.decode('utf-8')) == [data['name'], data['code'], 0]", "def display_accounts_details():\n return Records.display_records()", "def account_info(request):\r\n user = request.user\r\n\r\n return _api_response(request, user.safe_data())", "def display_accounts_details():\n return Credentials.display_credentials()", "def get_account_details(self):\n pass", "def account(self):\n return self.request('/account')", "def view_bank_account_details(self) -> None:\n Menu.prompt_view_bank_account_details()\n print(\"Bank Account Details:\")\n print(self.user.account)\n\n for tx_num, tx_details in \\\n self.user.tx_manager.transaction_records.items():\n print(f\"\\nTransaction #{tx_num}:\\n\"\n f\"{tx_details}\")\n\n print(f\"\\nSpending Summary:\")\n print(f\" Starting Bank Balance: \"\n f\"{'{:.2f}'.format(self.user.account.starting_balance)}\")\n print(f\" Total Transactions Amount: \"\n f\"{'{:.2f}'.format(self.user.tx_manager.calc_total_spent())}\")\n print(f\" Closing Bank Account Balance: \"\n f\"{'{:.2f}'.format(self.user.account.current_balance)}\")", "def post(self):\n ctx = _request_ctx_stack.top\n current_user = ctx.user\n request_body = request.get_json()\n name = request_body.get('name')\n account_type = request_body.get('type')\n initial_balance = request_body.get('ini_bal')\n if name:\n try:\n acc_factory = AccountFactory()\n if account_type == 'credit':\n limit = request_body.get('limit')\n if limit is None:\n return response('failed', 'Please specify a credit limit for a credit account', 400)\n new_account = acc_factory.create_account(\n name=name,\n account_type=account_type,\n user_id=current_user.id,\n initial_balance=initial_balance,\n limit=limit\n )\n else:\n new_account = acc_factory.create_account(\n name=name,\n account_type=account_type,\n user_id=current_user.id,\n initial_balance=initial_balance\n )\n new_account.save()\n except IntegrityError:\n return response('failed', 'Duplicate account name', 400)\n else:\n return response_created_account(new_account, 200)\n return response('failed', 'Missing account name attribute', 400)", "def account_info(self):\n url, params, headers = self.request(\"/account/info\", method='GET')\n\n return self.rest_client.GET(url, headers)", "def account():\n\n return render_template('account_page.html', title='Account')", "def response_created_account(new_account, status_code):\n status = {\n 'status': 'success'\n }\n new_acc_info = new_account.json()\n return make_response(jsonify({**status, **new_acc_info})), status_code", "async def get_account_info(self, **kwargs: Any) -> None: # pylint: disable=inconsistent-return-statements\n error_map = {\n 401: ClientAuthenticationError,\n 404: ResourceNotFoundError,\n 409: ResourceExistsError,\n 304: ResourceNotModifiedError,\n }\n error_map.update(kwargs.pop(\"error_map\", {}) or {})\n\n _headers = kwargs.pop(\"headers\", {}) or {}\n _params = case_insensitive_dict(kwargs.pop(\"params\", {}) or {})\n\n restype: Literal[\"account\"] = kwargs.pop(\"restype\", _params.pop(\"restype\", \"account\"))\n comp: Literal[\"properties\"] = kwargs.pop(\"comp\", _params.pop(\"comp\", \"properties\"))\n cls: ClsType[None] = kwargs.pop(\"cls\", None)\n\n request = build_get_account_info_request(\n url=self._config.url,\n restype=restype,\n comp=comp,\n version=self._config.version,\n template_url=self.get_account_info.metadata[\"url\"],\n headers=_headers,\n params=_params,\n )\n request = _convert_request(request)\n request.url = self._client.format_url(request.url)\n\n _stream = False\n pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access\n request, stream=_stream, **kwargs\n )\n\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize.failsafe_deserialize(_models.StorageError, pipeline_response)\n raise HttpResponseError(response=response, model=error)\n\n response_headers = {}\n response_headers[\"x-ms-client-request-id\"] = self._deserialize(\n \"str\", response.headers.get(\"x-ms-client-request-id\")\n )\n response_headers[\"x-ms-request-id\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-request-id\"))\n response_headers[\"x-ms-version\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-version\"))\n response_headers[\"Date\"] = self._deserialize(\"rfc-1123\", response.headers.get(\"Date\"))\n response_headers[\"x-ms-sku-name\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-sku-name\"))\n response_headers[\"x-ms-account-kind\"] = self._deserialize(\"str\", response.headers.get(\"x-ms-account-kind\"))\n\n if cls:\n return cls(pipeline_response, None, response_headers)", "def test_showing_dietitian_account(self):\n\n result = self.client.get(\"/dietitian/1/account\")\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"Account Details\", result.data)\n\n result = self.client.get(\"/dietitian/2/account\", follow_redirects=True)\n self.assertEqual(result.status_code, 200)\n self.assertIn(b\"not authorized\", result.data)", "def get_describable_list(request):\n describables = []\n\n from django.apps import apps\n for entity in apps.get_app_config('descriptor').describable_entities:\n content_type = get_object_or_404(\n ContentType, app_label=entity._meta.app_label, model=entity._meta.model_name)\n\n describables.append({\n 'id': content_type.pk,\n 'value': \"%s.%s\" % (entity._meta.app_label, entity._meta.model_name),\n 'label': str(entity._meta.verbose_name.capitalize())\n })\n\n return HttpResponseRest(request, describables)", "def create_account():\n\n return render_template('account.html')", "def account_summary(self):\n pass", "def describe_account_attributes():\n pass", "def test_create_account(self):\n url = reverse('portal-list')\n data = {'brandID': 5, 'status' : 'Enabled'}\n response = self.client.post(url, data, format='json')\n\n #response = self.client.get(url)\n #print response\n #response = self.client.get('/v1/portal/1/')\n #print response\n #self.assertEqual(response.data[\"ud\"], {'id': 1, 'brandID': 4})\n self.assertEqual(response.data[\"brandID\"], 5)\n\n \"\"\"\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Account.objects.count(), 1)\n self.assertEqual(Account.objects.get().name, 'DabApps')\n \"\"\"", "def get_account_info(self):\n resource = self.domain + \"/account\"\n self.logger.debug(\"Pulling data from {0}\".format(resource))\n response = self.session.get(resource)\n\n if response.status_code != requests.codes.ok:\n return response.raise_for_status()\n data = response.text\n root = Et.fromstring(data)\n bf = BadgerFish(dict_type=dict)\n account_info = bf.data(root)\n return account_info", "def account_info(remote, resp):\n try:\n return _account_info(remote, resp)\n except OAuthCERNRejectedAccountError as e:\n current_app.logger.warning(e.message, exc_info=True)\n flash(_(\"CERN account not allowed.\"), category=\"danger\")\n return redirect(\"/\")", "async def get_account_info(\n self,\n **kwargs\n ) -> None:\n cls = kwargs.pop('cls', None) # type: ClsType[None]\n error_map = {\n 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError\n }\n error_map.update(kwargs.pop('error_map', {}))\n restype = \"account\"\n comp = \"properties\"\n accept = \"application/xml\"\n\n # Construct URL\n url = self.get_account_info.metadata['url'] # type: ignore\n path_format_arguments = {\n 'url': self._serialize.url(\"self._config.url\", self._config.url, 'str', skip_quote=True),\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {} # type: Dict[str, Any]\n query_parameters['restype'] = self._serialize.query(\"restype\", restype, 'str')\n query_parameters['comp'] = self._serialize.query(\"comp\", comp, 'str')\n\n # Construct headers\n header_parameters = {} # type: Dict[str, Any]\n header_parameters['x-ms-version'] = self._serialize.header(\"self._config.version\", self._config.version, 'str')\n header_parameters['Accept'] = self._serialize.header(\"accept\", accept, 'str')\n\n request = self._client.get(url, query_parameters, header_parameters)\n pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)\n response = pipeline_response.http_response\n\n if response.status_code not in [200]:\n map_error(status_code=response.status_code, response=response, error_map=error_map)\n error = self._deserialize(_models.StorageError, response)\n raise HttpResponseError(response=response, model=error)\n\n response_headers = {}\n response_headers['x-ms-client-request-id']=self._deserialize('str', response.headers.get('x-ms-client-request-id'))\n response_headers['x-ms-request-id']=self._deserialize('str', response.headers.get('x-ms-request-id'))\n response_headers['x-ms-version']=self._deserialize('str', response.headers.get('x-ms-version'))\n response_headers['Date']=self._deserialize('rfc-1123', response.headers.get('Date'))\n response_headers['x-ms-sku-name']=self._deserialize('str', response.headers.get('x-ms-sku-name'))\n response_headers['x-ms-account-kind']=self._deserialize('str', response.headers.get('x-ms-account-kind'))\n\n if cls:\n return cls(pipeline_response, None, response_headers)", "def get_bill_details(request):\n\n print request\n\n context = request['context']\n print context\n try:\n telephone_number = first_entity_value(request['entities'], 'phone_number')\n with open(os.path.join(sys.path[0], \"app/wit/static/users.json\"), \"r\") as data_file:\n data = json.load(data_file)\n customer_billing = data[telephone_number]['last_month_billing']\n print customer_billing\n\n customer_type = data[telephone_number]['type_customer']\n if customer_type == 'postpaid':\n\n reply = \"Our Initial Investigation shows that you're a \" + data[telephone_number]['type_customer'] + \" Customer and currently using \" + data[telephone_number]['plan_details'] + \" plan type.\"\n if customer_billing['roaming'] == 'True':\n reply += \"You had used your cellphone while on roaming for which you were charged extra.\"\n elif customer_billing['data_exhaust'] == 'True':\n reply += \"You had used your data network after your allocated limit was exhausted. You were charged for these services\"\n elif customer_billing['subscribed'] == 'True':\n reply += \"You had subscribed to some promotional services for which you were charged in extra.\"\n else:\n reply = \"Our Initial Investigation shows that you're a \" + data[telephone_number]['type_customer'] + \". We believe that this might be a mistake from our side and would like you to speak to our customer care executives separately.\"\n\n\n except:\n telephone_number = None\n reply = \"Your number is not subscribed with Airtel. Please contact your network operator for your query\"\n\n\n print reply\n\n context['bill_details'] = reply\n\n return context", "def list(self, request, graph_type=None):\n user = {}\n if request.authenticated_userid:\n account = Account.one(request, request.authenticated_userid)\n user['account_id'] = account.id\n if account.check_admin(request, user):\n cleaned_data = {}\n raw_data = NLTKOutput.all(request)\n for record in raw_data:\n if record.account_id in cleaned_data:\n cleaned_data[record.account_id].append(record.nltk_result)\n else:\n cleaned_data[record.account_id] = [record.nltk_result]\n if graph_type == 'stacked_bar':\n return_obj = stacked_bar_for_all(cleaned_data)\n if graph_type == 'pie':\n return_obj = pie_for_all(cleaned_data)\n if graph_type == 'compound_bar':\n return_obj = compound_for_all(cleaned_data) \n return Response(return_obj.encode(), status=200)", "def account(request: Request) -> Dict:\n # Get account\n account_id: int = request.matchdict.get(\"account_id\")\n account_obj: Optional[Account] = get_account_by_id(\n session=request.dbsession,\n account_id=account_id,\n )\n # TODO: Check access\n\n\n return {\n \"account\": account_obj,\n }", "def addemail_response(account, conf):\n template_args = dict(\n site_name=conf.get('name', ''),\n account=account\n )\n\n addemail_view = TemplateView(\n config.template_filepath('addemail.html'),\n template_args\n )\n\n return HttpResponse(addemail_view)", "def describe_accounts_with_options(\n self,\n request: dds_20151201_models.DescribeAccountsRequest,\n runtime: util_models.RuntimeOptions,\n ) -> dds_20151201_models.DescribeAccountsResponse:\n UtilClient.validate_model(request)\n query = {}\n if not UtilClient.is_unset(request.account_name):\n query['AccountName'] = request.account_name\n if not UtilClient.is_unset(request.dbinstance_id):\n query['DBInstanceId'] = request.dbinstance_id\n if not UtilClient.is_unset(request.owner_account):\n query['OwnerAccount'] = request.owner_account\n if not UtilClient.is_unset(request.owner_id):\n query['OwnerId'] = request.owner_id\n if not UtilClient.is_unset(request.resource_owner_account):\n query['ResourceOwnerAccount'] = request.resource_owner_account\n if not UtilClient.is_unset(request.resource_owner_id):\n query['ResourceOwnerId'] = request.resource_owner_id\n if not UtilClient.is_unset(request.security_token):\n query['SecurityToken'] = request.security_token\n req = open_api_models.OpenApiRequest(\n query=OpenApiUtilClient.query(query)\n )\n params = open_api_models.Params(\n action='DescribeAccounts',\n version='2015-12-01',\n protocol='HTTPS',\n pathname='/',\n method='POST',\n auth_type='AK',\n style='RPC',\n req_body_type='formData',\n body_type='json'\n )\n return TeaCore.from_map(\n dds_20151201_models.DescribeAccountsResponse(),\n self.call_api(params, req, runtime)\n )", "def account_post(request):\n fields = [\"fname\", \"lname\", \"email\", \"token\"]\n body = None\n\n try:\n body = request.get_json()\n except:\n return http400(\"Missing body\")\n\n body_validation = validate_body(body, fields)\n # check that body validation succeeded\n if body_validation[1] != 200:\n return body_validation\n\n auth = azure_refresh_token(body[\"token\"])\n if not auth[0]:\n return http400(\"Not Authenticated\")\n\n account_db = Database(\"accounts\")\n\n try:\n db_entry = {\n \"fname\": body[\"fname\"],\n \"lname\": body[\"lname\"],\n \"email\": body[\"email\"],\n }\n\n account_db.add(db_entry, id=body[\"email\"])\n except:\n return http400(\"Email already taken\")\n\n response = {\n \"fname\": body[\"fname\"],\n \"lname\": body[\"lname\"],\n \"email\": body[\"email\"],\n \"access_token\": auth[0],\n \"refresh_token\": auth[1],\n }\n\n return jsonHttp200(\"Account Created\", response)", "def get_account():\n\n wallet = \"TTfoWGU2M939cgZm8CksPtz1ytJRM9GiN7\"\n\n url = \"https://api.trongrid.io/v1/accounts/{}\".format(wallet)\n\n print(url)\n\n response = requests.request(\"GET\", url)\n\n print(response.text)", "def get_account_info(self):\n resp = requests.get(\n self.URL + 'info/',\n headers={'Authorization': 'Token ' + self.api_key}\n )\n\n return self.__handle_response(resp)", "def check_ui_response_basic_info(response, expected_code=200, expected_type=CONTENT_TYPE_HTML,\n expected_title=\"Magpie Administration\"):\n # type: (AnyResponseType, int, Str, Optional[Str]) -> Str\n msg = None \\\n if get_header(\"Content-Type\", response.headers) != CONTENT_TYPE_JSON \\\n else \"Response body: {}\".format(get_json_body(response))\n check_val_equal(response.status_code, expected_code, msg=msg)\n check_val_is_in(\"Content-Type\", dict(response.headers))\n check_val_is_in(expected_type, get_response_content_types_list(response))\n if expected_title:\n check_val_is_in(expected_title, response.text, msg=null) # don't output big html if failing\n return response.text" ]
[ "0.5829913", "0.57385606", "0.57148266", "0.5661263", "0.56181955", "0.5576239", "0.55430156", "0.5522742", "0.54032105", "0.5379178", "0.53114665", "0.52346975", "0.52147704", "0.5186668", "0.5185481", "0.5180433", "0.51534367", "0.51526195", "0.5146171", "0.51339394", "0.5118431", "0.51027346", "0.50815433", "0.50807893", "0.5068942", "0.5066099", "0.50627553", "0.5057437", "0.50537616", "0.5052173" ]
0.5999456
0
Build omega and weight for one quadrant.
def Build_quadrant(self) : self.omega = np.zeros((self.n_dir,3)) self.weight = np.zeros((self.n_dir)) if self.sn==2 : direction = 0.577350269189625764509149 weight = 1. self.omega[0,0] = direction self.omega[0,1] = direction self.omega[0,2] = direction self.weight[0] = weight elif self.sn==4 : direction_1 = 0.350021174581540677777041 direction_2 = 0.868890300722201205229788 weight = 1./3. self.omega[0,0] = direction_2 self.omega[0,1] = direction_1 self.omega[0,2] = direction_1 self.omega[1,0] = direction_1 self.omega[1,1] = direction_2 self.omega[1,2] = direction_1 self.omega[2,0] = direction_1 self.omega[2,1] = direction_1 self.omega[2,2] = direction_2 self.weight[0] = weight self.weight[1] = weight self.weight[2] = weight elif self.sn==6 : direction_1 = 0.266635401516704720331535 direction_2 = 0.681507726536546927403750 direction_3 = 0.926180935517489107558380 weight_1 = 0.176126130863383433783565 weight_2 = 0.157207202469949899549768 self.omega[0,0] = direction_3 self.omega[0,1] = direction_1 self.omega[0,2] = direction_1 self.omega[1,0] = direction_2 self.omega[1,1] = direction_2 self.omega[1,2] = direction_1 self.omega[2,0] = direction_1 self.omega[2,1] = direction_3 self.omega[2,2] = direction_1 self.omega[3,0] = direction_2 self.omega[3,1] = direction_1 self.omega[3,2] = direction_2 self.omega[4,0] = direction_1 self.omega[4,1] = direction_2 self.omega[4,2] = direction_2 self.omega[5,0] = direction_1 self.omega[5,1] = direction_1 self.omega[5,2] = direction_3 self.weight[0] = weight_1 self.weight[1] = weight_2 self.weight[2] = weight_1 self.weight[3] = weight_2 self.weight[4] = weight_2 self.weight[5] = weight_1 elif self.sn==8 : direction_1 = 0.218217890235992381266097 direction_2 = 0.577350269189625764509149 direction_3 = 0.786795792469443145800830 direction_4 = 0.951189731211341853132399 weight_1 = 0.120987654320987654320988 weight_2 = 0.0907407407407407407407407 weight_3 = 0.0925925925925925925925926 self.omega[0,0] = direction_4 self.omega[0,1] = direction_1 self.omega[0,2] = direction_1 self.omega[1,0] = direction_3 self.omega[1,1] = direction_2 self.omega[1,2] = direction_1 self.omega[2,0] = direction_2 self.omega[2,1] = direction_3 self.omega[2,2] = direction_1 self.omega[3,0] = direction_1 self.omega[3,1] = direction_4 self.omega[3,2] = direction_1 self.omega[4,0] = direction_3 self.omega[4,1] = direction_1 self.omega[4,2] = direction_2 self.omega[5,0] = direction_2 self.omega[5,1] = direction_2 self.omega[5,2] = direction_2 self.omega[6,0] = direction_1 self.omega[6,1] = direction_3 self.omega[6,2] = direction_2 self.omega[7,0] = direction_2 self.omega[7,1] = direction_1 self.omega[7,2] = direction_3 self.omega[8,0] = direction_1 self.omega[8,1] = direction_2 self.omega[8,2] = direction_3 self.omega[9,0] = direction_1 self.omega[9,1] = direction_1 self.omega[9,2] = direction_4 self.weight[0] = weight_1 self.weight[1] = weight_2 self.weight[2] = weight_2 self.weight[3] = weight_1 self.weight[4] = weight_2 self.weight[5] = weight_3 self.weight[6] = weight_2 self.weight[7] = weight_2 self.weight[8] = weight_2 self.weight[9] = weight_1 elif self.sn==10 : direction_1 = 0.189321326478010476671494 direction_2 = 0.508881755582618974382711 direction_3 = 0.694318887594384317279217 direction_4 = 0.839759962236684758403029 direction_5 = 0.963490981110468484701598 weight_1 = 0.0893031479843567214704325 weight_2 = 0.0725291517123655242296233 weight_3 = 0.0450437674364086390490892 weight_4 = 0.0539281144878369243545650 self.omega[0,0] = direction_5 self.omega[0,1] = direction_1 self.omega[0,2] = direction_1 self.omega[1,0] = direction_4 self.omega[1,1] = direction_2 self.omega[1,2] = direction_1 self.omega[2,0] = direction_3 self.omega[2,1] = direction_3 self.omega[2,2] = direction_1 self.omega[3,0] = direction_2 self.omega[3,1] = direction_4 self.omega[3,2] = direction_1 self.omega[4,0] = direction_1 self.omega[4,1] = direction_5 self.omega[4,2] = direction_1 self.omega[5,0] = direction_4 self.omega[5,1] = direction_1 self.omega[5,2] = direction_2 self.omega[6,0] = direction_3 self.omega[6,1] = direction_2 self.omega[6,2] = direction_2 self.omega[7,0] = direction_2 self.omega[7,1] = direction_3 self.omega[7,2] = direction_2 self.omega[8,0] = direction_1 self.omega[8,1] = direction_4 self.omega[8,2] = direction_2 self.omega[9,0] = direction_3 self.omega[9,1] = direction_1 self.omega[9,2] = direction_3 self.omega[10,0] = direction_2 self.omega[10,1] = direction_2 self.omega[10,2] = direction_3 self.omega[11,0] = direction_1 self.omega[11,1] = direction_3 self.omega[11,2] = direction_3 self.omega[12,0] = direction_2 self.omega[12,1] = direction_1 self.omega[12,2] = direction_4 self.omega[13,0] = direction_1 self.omega[13,1] = direction_2 self.omega[13,2] = direction_4 self.weight[0] = weight_1 self.weight[1] = weight_2 self.weight[2] = weight_3 self.weight[3] = weight_2 self.weight[4] = weight_1 self.weight[5] = weight_2 self.weight[6] = weight_4 self.weight[7] = weight_4 self.weight[8] = weight_2 self.weight[9] = weight_3 self.weight[10] = weight_4 self.weight[11] = weight_3 self.weight[12] = weight_2 self.weight[13] = weight_2 self.weight[14] = weight_1 elif self.sn==12 : direction = np.zeros((6,1)) direction[0] = 0.167212652822713264084504 direction[1] = 0.459547634642594690016761 direction[2] = 0.628019096642130901034766 direction[3] = 0.760021014833664062877138 direction[4] = 0.872270543025721502340662 direction[5] = 0.971637719251358378302376 weight_1 = 0.0707625899700910439766549 weight_2 = 0.0558811015648888075828962 weight_3 = 0.0373376737588285824652402 weight_4 = 0.0502819010600571181385765 weight_5 = 0.0258512916557503911218290 for i in xrange(0,6) : self.omega[i,0] = direction[5-i] self.omega[i,1] = direction[i] self.omega[i,2] = direction[0] offset = 6 for i in xrange(0,5) : self.omega[offset+i,0] = direction[4-i] self.omega[offset+i,1] = direction[i] self.omega[offset+i,2] = direction[1] offset += 5 for i in xrange(0,4) : self.omega[offset+i,0] = direction[3-i] self.omega[offset+i,1] = direction[i] self.omega[offset+i,2] = direction[2] offset += 4 for i in xrange(0,3) : self.omega[offset+i,0] = direction[2-i] self.omega[offset+i,1] = direction[i] self.omega[offset+i,2] = direction[3] offset += 3 for i in xrange(0,2) : self.omega[offset+i,0] = direction[1-i] self.omega[offset+i,1] = direction[i] self.omega[offset+i,2] = direction[4] offset += 2 self.omega[offset+i,0] = direction[0] self.omega[offset+i,1] = direction[1] self.omega[offset+i,2] = direction[5] self.weight[0] = weigth_1 self.weight[1] = weight_2 self.weight[2] = weight_3 self.weight[3] = weight_3 self.weight[4] = weight_2 self.weight[5] = weight_1 self.weight[6] = weight_2 self.weight[7] = weight_4 self.weight[8] = weight_5 self.weight[9] = weight_4 self.weight[10] = weight_2 self.weight[11] = weight_3 self.weight[12] = weight_5 self.weight[13] = weight_5 self.weight[14] = weight_3 self.weight[15] = weight_3 self.weight[16] = weight_4 self.weight[17] = weight_3 self.weight[18] = weight_2 self.weight[19] = weight_2 self.weight[20] = weight_1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calc_quad(self,mw,A0,A1,A2): \n return (A0 + A1 * mw + A2 * mw**2)", "def generate_quadrant_coordinates(self):\n qw = 1. # quadrant width\n Qc = np.array([[0, 0]])\n Qo = np.array([[-qw, -qw], [qw, qw], [qw, -qw], [-qw, qw]])\n Qstd = np.array([qw])\n if self.center_first_l_levels > 0:\n Qstd *= 0.\n\n ls = self.per_level_speed\n s = np.array([[ls[0], ls[0]]])\n\n row_idx = 0 # start filling adjacency mat from root node\n col_idx = 1 # skip the root node and start from 2nd node\n for l in range(self.nl):\n Qo /= 2\n qw /= 2\n for n in range(self.nn[l]):\n for c in range(self.nc[l]):\n pc = Qc[row_idx]\n cc = pc + Qo[c]\n Qc = np.append(Qc, [cc], axis=0)\n if l < self.center_first_l_levels - 1:\n Qstd = np.append(Qstd, 0)\n else:\n Qstd = np.append(Qstd, qw)\n col_idx += 1\n s = np.append(s, [[ls[l+1], ls[l+1]]], axis=0)\n # Increase parent index after populating all its children nodes\n row_idx += 1\n\n return Qc.transpose(), Qstd.transpose(), s.transpose()", "def getQuadrilaterals(self):\n pass", "def calculateElementQuadrature(self):\n #\n #get physical locations of quadrature points and jacobian information there\n #assume all components live on the same mesh\n #\n #mwf debug\n #import pdb\n #pdb.set_trace()\n self.u[0].femSpace.elementMaps.getValues(self.elementQuadraturePoints,\n self.q['x'])\n if self.movingDomain:\n if self.tLast_mesh != None:\n self.q['xt'][:]=self.q['x']\n self.q['xt']-=self.q['x_last']\n alpha = 1.0/(self.t_mesh - self.tLast_mesh)\n self.q['xt']*=alpha\n else:\n self.q['xt'][:]=0.0\n self.q['x_last'][:]=self.q['x']\n self.u[0].femSpace.elementMaps.getJacobianValues(self.elementQuadraturePoints,\n self.q['J'],\n self.q['inverse(J)'],\n self.q['det(J)'])\n self.q['abs(det(J))']=numpy.absolute(self.q['det(J)'])\n #\n # get physical space integration weights\n #\n self.q['dV'] = numpy.zeros((self.mesh.nElements_global,self.nQuadraturePoints_element),'d')\n cfemIntegrals.calculateIntegrationWeights(self.q['abs(det(J))'],\n self.elementQuadratureWeights[('u',0)],\n self.q['dV'])\n for ci in range(self.nc): self.q[('dV_u',ci)] = self.q['dV']\n #\n #get shape information at the quadrature points\n #\n self.testSpace[0].getBasisValues(self.elementQuadraturePoints,\n self.q[('w',0)])\n cfemIntegrals.calculateWeightedShape(self.elementQuadratureWeights[('u',0)],\n self.q['abs(det(J))'],\n self.q[('w',0)],\n self.q[('w*dV',0)])\n cfemIntegrals.calculateWeightedShape(self.elementQuadratureWeights[('m',0)],\n self.q['abs(det(J))'],\n self.q[('w',0)],\n self.q[('w*dV_m',0)])\n self.testSpace[0].getBasisGradientValues(self.elementQuadraturePoints,\n self.q['inverse(J)'],\n self.q[('grad(w)',0)])\n cfemIntegrals.calculateWeightedShapeGradients(self.elementQuadratureWeights[('u',0)],\n self.q['abs(det(J))'],\n self.q[('grad(w)',0)],\n self.q[('grad(w)*dV',0)])\n\n #\n self.ellamDiscretization.updateElementQuadrature(self.q)\n #\n self.coefficients.initializeElementQuadrature(self.timeIntegration.t,self.q)", "def buildObjective(self):\r\n\r\n # self.z_prior might be the modified version\r\n self.L_elbo = T.mean(self.reconst + self.conditional_prior + self.w_prior + self.z_prior)\r\n\r\n self.L_elbo_modif = T.mean(self.reconst + self.conditional_prior + self.w_prior_modif + self.z_prior_modif)\r\n\r\n #---Getting model parameter---#\r\n cg = ComputationGraph(self.L_elbo)\r\n #self.phi_theta is the list of all the parameters in q and p.\r\n self.params = VariableFilter(roles=[PARAMETER])(cg.variables)", "def quadrature_weights(b, convention='Gauss-Legendre'):\n if convention == 'Clenshaw-Curtis':\n # Use the fast fft based method to compute these weights\n # see \"Fast evaluation of quadrature formulae on the sphere\"\n w = _clenshaw_curtis_weights(n=2 * b)\n W = np.empty((2 * b + 2, 2 * b + 1))\n W[:] = w[None, :]\n elif convention == 'Gauss-Legendre':\n # We found this formula in:\n # \"A Fast Algorithm for Spherical Grid Rotations and its Application to Singular Quadrature\"\n # eq. 10\n _, w = leggauss(b + 1)\n W = w[None, :] * (2 * np.pi / (2 * b + 2) * np.ones(2 * b + 2)[:, None])\n else:\n raise ValueError('Unknown convention:' + str(convention))\n\n return W", "def getQuadOp(self):\n return self.basis2grid(np.eye(self.nb), axis = 0)", "def computequadweights(self, order):\n if order not in AVAILABLEORDERS:\n neighbor = find_nearest(AVAILABLEORDERS, order)\n raise ValueError(\n \"Order not available. Next closest would be\" \"%i.\",\n AVAILABLEORDERS[neighbor],\n )\n filename = \"data/\" + str(order) + \"_levelsym.txt\"\n __location__ = os.path.realpath(\n os.path.join(os.getcwd(), os.path.dirname(__file__))\n )\n path = os.path.join(__location__, filename)\n xyzw = loadtxt(path, delimiter=\",\")\n # d = levelsymmetricdictionary()\n # xyzw = d[order]\n w = xyzw[:, 3]\n w /= sum(w)\n w *= 4 * pi\n return w", "def q_(w,R,lam=1064.0e-9):\n\n if R!=np.inf:\n q=np.pi*w**2*R/(np.pi*w**2-1j*R*lam)\n else:\n q=1j*np.pi*w**2/lam\n\n return q", "def _generateQuadsAndPolys(self,SVL):\n ROMdata = SVL.interpolationInfo()\n self.maxPolyOrder = SVL.maxPolyOrder\n #check input space consistency\n samVars=self.axisName[:]\n romVars=SVL.features[:]\n try:\n for v in self.axisName:\n samVars.remove(v)\n romVars.remove(v)\n except ValueError:\n self.raiseAnError(IOError, f'variable {v} used in sampler but not ROM features! Collocation requires all vars in both.')\n if len(romVars) > 0:\n self.raiseAnError(IOError, f'variables {romVars} specified in ROM but not sampler! Collocation requires all vars in both.')\n for v in ROMdata.keys():\n if v not in self.axisName:\n self.raiseAnError(IOError, f'variable \"{v}\" given interpolation rules but variable not in sampler!')\n else:\n self.gridInfo[v] = ROMdata[v] #quad, poly, weight\n #set defaults, then replace them if they're asked for\n for v in self.axisName:\n if v not in self.gridInfo:\n self.gridInfo[v]={'poly': 'DEFAULT', 'quad': 'DEFAULT', 'weight': '1'}\n #establish all the right names for the desired types\n for varName,dat in self.gridInfo.items():\n if dat['poly'] == 'DEFAULT':\n dat['poly'] = self.dists[varName].preferredPolynomials\n if dat['quad'] == 'DEFAULT':\n dat['quad'] = self.dists[varName].preferredQuadrature\n polyType=dat['poly']\n subType = None\n distr = self.dists[varName]\n if polyType == 'Legendre':\n if distr.type == 'Uniform':\n quadType=dat['quad']\n else:\n quadType='CDF'\n subType=dat['quad']\n if subType not in ['Legendre', 'ClenshawCurtis']:\n self.raiseAnError(IOError, f'Quadrature {subType} not compatible with Legendre polys for {distr.type} for variable {varName}!')\n else:\n quadType=dat['quad']\n if quadType not in distr.compatibleQuadrature:\n self.raiseAnError(IOError, f'Quadrature type \"{quadType}\" is not compatible with variable \"{varName}\" distribution \"{distr.type}\"')\n\n quad = Quadratures.factory.returnInstance(quadType, Subtype=subType)\n quad.initialize(distr)\n self.quadDict[varName]=quad\n\n poly = OrthoPolynomials.factory.returnInstance(polyType)\n poly.initialize(quad)\n self.polyDict[varName] = poly\n\n self.importanceDict[varName] = float(dat['weight'])", "def buildQ(self):\r\n\r\n print 'Building Q ...'\r\n\r\n self.y = T.matrix('y')\r\n\r\n mlp = MLP(activations=self.hyper['q_activs'],\r\n dims=self.hyper['q_dims'],\r\n weights_init=self.hyper['q_W_init'],\r\n biases_init=Constant(0))\r\n\r\n q_parameters = mlp.apply(self.y)\r\n mlp.initialize()\r\n\r\n # self.qxgy_mu.shape == (minibatch size, num of dimension of x)\r\n self.qxgy_mu = q_parameters[:,:self.hyper['x_dim']]\r\n\r\n # self.qxgy_var.shape == (minibatch size, num of dimension of x)\r\n self.qxgy_var = T.exp( q_parameters[:,self.hyper['x_dim']:2*self.hyper['x_dim']] )\r\n\r\n # self.qwgy_mu.shape == (minibatch size, num of dimension of w)\r\n self.qwgy_mu = q_parameters[:,2*self.hyper['x_dim']:2*self.hyper['x_dim']+self.hyper['w_dim']]\r\n\r\n # self.qwgy_var.shape == (minibatch size, num of dimension of w)\r\n self.qwgy_var = T.exp( q_parameters[:,2*self.hyper['x_dim']+self.hyper['w_dim']:] )\r\n\r\n\r\n #---Will be useful to compute samples from q(x|y)---#\r\n #self.eps_x.shape == (minibatch size, # of x samples , # of dimension of x)\r\n self.eps_x = self.srng.normal((self.qxgy_mu.shape[0] ,self.hyper['L_x'] ,self.hyper['x_dim']))\r\n\r\n #self.x corresponds roughly to the function g(\\epsilon,y) (see reparametrization trick in Kingma 2014)\r\n #self.x.shape == (minibatch size, # of x samples , # of dimension of x)\r\n self.x = self.qxgy_mu.dimshuffle(0,'x',1) + T.sqrt(self.qxgy_var).dimshuffle(0,'x',1)*self.eps_x\r\n\r\n #---Will be useful to compute samples from q(w|y)---#\r\n #self.eps_w.shape == (minibatch size, # of w samples , # of dimension of w)\r\n self.eps_w = self.srng.normal((self.qwgy_mu.shape[0] ,self.hyper['L_w'] ,self.hyper['w_dim']))\r\n\r\n #self.w corresponds roughly to the function g(\\epsilon,y) (see reparametrization trick in Kingma 2014)\r\n #self.w.shape == (minibatch size, # of w samples , # of dimension of w)\r\n self.w = self.qwgy_mu.dimshuffle(0,'x',1) + T.sqrt(self.qwgy_var).dimshuffle(0,'x',1)*self.eps_w\r\n\r\n\r\n #---Building the log density q(x|y)---#\r\n little_num = 10**(-32)\r\n inside_exp = -T.sum((self.x - self.qxgy_mu.dimshuffle(0,'x',1))**2/(2*self.qxgy_var.dimshuffle(0,'x',1)), axis=2)\r\n norm_cst = (2*np.pi)**(-self.hyper['x_dim']/2.)*T.exp(T.sum(T.log(self.qxgy_var), axis=1))**(-1/2.)\r\n\r\n # shape == (minibatch size, # of x samples)\r\n qxgy = norm_cst.dimshuffle(0,'x')*T.exp(inside_exp)\r\n\r\n # shape == (minibatch size, # of x samples)\r\n self.log_qxgy = T.log(qxgy + little_num)", "def build(self,input_shape):\n\n self.w = self.add_weight(shape=(input_shape[-1],self.units),\n initializer='random_normal',\n trainable=True)\n self.b = self.add_weight(shape=(self.units,),\n initializer='random_normal',\n trainable=True)", "def _set_points_and_weights(self):\n if hasattr(self, 'corr'):\n corr = self.corr\n else:\n corr = None\n self.quadrature = Quadrature(parameters=self.parameters, basis=self.basis, \\\n points=self.inputs, mesh=self.mesh, corr=corr)\n quadrature_points, quadrature_weights = self.quadrature.get_points_and_weights()\n if self.subsampling_algorithm_name is not None:\n P = self.get_poly(quadrature_points)\n W = np.mat( np.diag(np.sqrt(quadrature_weights)))\n A = W * P.T\n self.A = A\n self.P = P\n mm, nn = A.shape\n m_refined = int(np.round(self.sampling_ratio * nn))\n z = self.subsampling_algorithm_function(A, m_refined)\n self._quadrature_points = quadrature_points[z,:]\n self._quadrature_weights = quadrature_weights[z] / np.sum(quadrature_weights[z])\n else:\n self._quadrature_points = quadrature_points\n self._quadrature_weights = quadrature_weights\n P = self.get_poly(quadrature_points)\n W = np.mat( np.diag(np.sqrt(quadrature_weights)))\n A = W * P.T\n self.A = A\n self.P = P", "def Q_term(\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3, # vorticity-3 component\n s11, # strain rate-11 component\n s12, # strain rate-12 component\n s13, # strain rate-13 component\n s22, # strain rate-22 component\n s23, # strain rate-23 component\n s33): # strain rate-33 component\n #---------------------------------------------------------------------#\n # Numerator and denominator #\n #---------------------------------------------------------------------#\n num = omega1*s11*omega1 + omega1*s12*omega2 + omega1*s13*omega3 +\\\n omega2*s12*omega1 + omega2*s22*omega2 + omega2*s23*omega3+\\\n omega3*s13*omega1 + omega3*s23*omega2 + omega3*s33*omega3\n den1 = omega1*omega1 + omega2*omega2 + omega3*omega3\n den2 = (s11*s11 + s12*s12 + s13*s13 + s12*s12 + s22*s22 + s23*s23 +\\\n s13*s13 + s23*s23 + s33*s33)**0.5\n den = ((2.0/3.0)**0.5)* den1 * den2\n #---------------------------------------------------------------------#\n # Q calculation #\n #---------------------------------------------------------------------#\n Q = num/den\n\n return Q", "def generateEqns(\n self, Simplify=False, Lambdify=True, FloatingBase=False,\n backend=\"numpy\"\n ):\n self.joint_syms = OrderedDict()\n self.global_syms = {}\n self.global_syms[\"Jname2q\"] = {}\n self.global_syms[\"q2Jname\"] = {}\n _Lname2parentJname, _Jname2parentJname = self._preprocess_heirarchy(\n FloatingBase\n )\n self.global_syms[\"Lname2parentJname\"] = _Lname2parentJname\n self.global_syms[\"Jname2parentJname\"] = _Jname2parentJname\n\n # record the number of degrees of freedom\n degrees_of_freedom = sum(\n [self.Joints[jnt][\"type\"] != \"fixed\" for jnt in self.Joints]\n )\n self.global_syms[\"dof\"] = degrees_of_freedom\n\n # joint positions q\n self.global_syms[\"q\"] = [\n sp.Symbol(f\"{self.sym_prefix}q{j}\")\n for j in range(degrees_of_freedom)\n ]\n\n # joint velocities dq\n self.global_syms[\"dq\"] = [\n sp.Symbol(f\"{self.sym_prefix}dq{j}\")\n for j in range(degrees_of_freedom)\n ]\n\n # joint user forces tau\n self.global_syms[\"qTau\"] = [\n sp.Symbol(f\"{self.sym_prefix}qTau{j}\")\n for j in range(degrees_of_freedom)\n ]\n\n # [x,y,z] translations (meaning relative to useage)\n self.global_syms[\"xyz\"] = [\n sp.Symbol(f\"{self.sym_prefix}x\"),\n sp.Symbol(f\"{self.sym_prefix}y\"),\n sp.Symbol(f\"{self.sym_prefix}z\"),\n ]\n zero_xyz = [(s, 0) for s in self.global_syms[\"xyz\"]]\n\n # [Wx,Wy,Wz] rotations (meaning relative to useage)\n self.global_syms[\"Wxyz\"] = [\n sp.Symbol(f\"{self.sym_prefix}Wx\"),\n sp.Symbol(f\"{self.sym_prefix}Wy\"),\n sp.Symbol(f\"{self.sym_prefix}Wz\"),\n ]\n zero_Wxyz = [(s, 0) for s in self.global_syms[\"Wxyz\"]]\n\n # translational and rotational accelerations [Ax,Ay,Az,AWx,AWy,AWz]\n # (meaning relative to useage)\n self.global_syms[\"extAccel\"] = [\n sp.Symbol(f\"{self.sym_prefix}Ax\"),\n sp.Symbol(f\"{self.sym_prefix}Ay\"),\n sp.Symbol(f\"{self.sym_prefix}Az\"),\n sp.Symbol(f\"{self.sym_prefix}AWx\"),\n sp.Symbol(f\"{self.sym_prefix}AWy\"),\n sp.Symbol(f\"{self.sym_prefix}AWz\"),\n ]\n\n #\n # create terms for each joint/link combo in the local isolated\n # reference frame (terms that need no other connected joint terms)\n #\n q_indx = 0\n for j_name in self.Joints:\n joint = self.Joints[j_name]\n if joint[\"child\"] not in self.Links:\n raise RuntimeError(\n f'child ({joint[\"child\"]}) of joint({j_name})'\n ' did not exist. Must create a link with this name.')\n clink = self.Links[joint[\"child\"]]\n joint_type = joint[\"type\"]\n\n # initialize an eqn dict for this joint (and link)\n self.joint_syms[j_name] = {}\n E = self.joint_syms[j_name]\n\n # joint (and link) mass\n E[\"mass\"] = clink[\"mass\"]\n\n # joint (and link) specific inertia matrix\n Inertia = sp.Matrix(clink[\"inertia\"])\n if Inertia.shape == (3, 3):\n E[\"M\"] = sp.Matrix(\n [\n [clink[\"mass\"], 0, 0, 0, 0, 0],\n [0, clink[\"mass\"], 0, 0, 0, 0],\n [0, 0, clink[\"mass\"], 0, 0, 0],\n [0, 0, 0, Inertia[0, 0], Inertia[0, 1], Inertia[0, 2]],\n [0, 0, 0, Inertia[1, 0], Inertia[1, 1], Inertia[1, 2]],\n [0, 0, 0, Inertia[2, 0], Inertia[2, 1], Inertia[2, 2]],\n ]\n )\n elif Inertia.shape == (6, 6):\n E[\"M\"] = Inertia\n else:\n raise ValueError(\n f\"inertia shape must be 3x3 or 6x6, not {Inertia.shape}\")\n\n # re-record (for convenience) the local q and dq, joint and joint\n # velocity terms, in their joint symbol containers\n if joint_type == \"fixed\":\n E[\"q\"] = 0\n E[\"dq\"] = 0\n E[\"qTau\"] = 0\n else:\n E[\"q\"] = self.global_syms[\"q\"][q_indx]\n E[\"dq\"] = self.global_syms[\"dq\"][q_indx]\n E[\"qTau\"] = self.global_syms[\"qTau\"][q_indx]\n q_indx += 1\n self.global_syms[\"q2Jname\"][E[\"q\"]] = j_name\n self.global_syms[\"Jname2q\"][j_name] = E[\"q\"]\n\n # process each joint type and apply the relevant q to a rpy,xyz\n # transform\n E[\"q_rpy\"] = sp.Matrix([0, 0, 0])\n E[\"q_xyz\"] = sp.Matrix([0, 0, 0])\n if joint_type == \"revolute\" or joint_type == \"continuous\":\n E[\"q_rpy\"] = E[\"q\"] * sp.Matrix(joint[\"axis_xyz\"])\n elif joint_type == \"prismatic\":\n E[\"q_xyz\"] = E[\"q\"] * sp.Matrix(joint[\"axis_xyz\"])\n elif joint_type == \"fixed\":\n pass\n elif joint_type == \"floating\":\n raise ValueError(\n \"no direct floating joint support (should have been\" +\n \" replaced by 3 prismatic, 3 continuous)\"\n )\n elif joint_type == \"planar\":\n raise ValueError(\n \"no direct planar joint support (should have been\" +\n \" replaced by 2 prismatic)\"\n )\n\n # creating homogeneous transformation matrix T, in joint and mass\n # spaces for various tranforms.\n #\n # The chain of transformations is diagramed as:\n # ... parent joint --> joint origin --> joint actuated --> ... etc.\n # actuated | |\n # --> parent link --> link\n #\n\n # parent joint's actuateed frame to joint's actuated frame\n E[\"Tlocal_joint\"] = rigmech.T(\n joint[\"origin_xyz\"], joint[\"origin_rpy\"]\n ) * rigmech.T(E[\"q_xyz\"], E[\"q_rpy\"])\n\n # joint's actuated frame to the child link's inertial frame\n E[\"T_joint2cLink\"] = rigmech.T(\n clink[\"origin_xyz\"], clink[\"origin_rpy\"])\n\n # parent joint's actuateed frame to child link's frame\n E[\"Tlocal_link\"] = E[\"Tlocal_joint\"] * E[\"T_joint2cLink\"]\n\n # inverse transformations\n E[\"Tlocal_joint_inv\"] = rigmech.T_inv(E[\"Tlocal_joint\"])\n E[\"Tlocal_link_inv\"] = rigmech.T_inv(E[\"Tlocal_link\"])\n\n print(f\"rigmech: Calculated {j_name} isolated.\")\n #\n # create non-isolated terms for each joint (terms that require\n # information about other connected joints)\n #\n\n for j_name in self.Joints:\n E = self.joint_syms[j_name]\n\n # T: transforms from base to joint or mass, for forward transform\n # calculations\n E[\"T_joint\"] = self.T_joint_chain(j_name)\n E[\"T_link\"] = E[\"T_joint\"] * E[\"T_joint2cLink\"]\n\n # T_inv: transforms for forward inverse transform calculations\n E[\"T_inv_joint\"] = rigmech.T_inv(E[\"T_joint\"])\n E[\"T_inv_link\"] = rigmech.T_inv(E[\"T_link\"])\n\n # xyz: translation from base to joint or link frame\n E[\"xyz_joint\"] = rigmech.applyTx(\n E[\"T_joint\"], sp.Matrix(self.global_syms[\"xyz\"]))\n E[\"xyz_link\"] = rigmech.applyTx(\n E[\"T_link\"], sp.Matrix(self.global_syms[\"xyz\"]))\n E[\"xyz_coj\"] = E[\"xyz_joint\"].subs(zero_xyz) # center of joint\n E[\"xyz_com\"] = E[\"xyz_link\"].subs(zero_xyz) # center of mass\n\n # Wxyz: rotation from base to joint or link frame\n E[\"W\"] = self.W_joint_chain(j_name)\n E[\"Wxyz_joint\"] = rigmech.applyTw(\n E[\"T_joint\"], E[\"W\"]+sp.Matrix(self.global_syms[\"Wxyz\"]))\n E[\"Wxyz_link\"] = rigmech.applyTw(\n E[\"T_link\"], E[\"W\"]+sp.Matrix(self.global_syms[\"Wxyz\"]))\n E[\"Wxyz_coj\"] = E[\"Wxyz_joint\"].subs(zero_Wxyz) # coj orientation\n E[\"Wxyz_com\"] = E[\"Wxyz_link\"].subs(zero_Wxyz) # com orientation\n\n # calculate the d[x(i) y(i) z(i) Wx(i) Wy(i) Wz(i)]/dq(j)\n # a.k.a. jacobian components for the current joint/link frame\n # (i) with respect to all the other joints (j) to form a\n # complete Jacobian matrix\n E[\"J_joint\"] = sp.Matrix()\n E[\"J_link\"] = sp.Matrix()\n for jnm in self.Joints:\n jnm_q = self.joint_syms[jnm][\"q\"]\n if jnm_q is not 0:\n\n # joints:\n dxyz_dq__joint = E[\"xyz_joint\"].diff(jnm_q)\n dWxyz_dq__joint = E[\"Wxyz_joint\"].diff(jnm_q)\n new_row = dxyz_dq__joint.col_join(dWxyz_dq__joint)\n E[\"J_joint\"] = E[\"J_joint\"].row_join(new_row)\n\n # links:\n dxyz_dq__link = E[\"xyz_link\"].diff(jnm_q)\n dWxyz_dq__link = E[\"Wxyz_link\"].diff(jnm_q)\n new_row = dxyz_dq__link.col_join(dWxyz_dq__link)\n E[\"J_link\"] = E[\"J_link\"].row_join(new_row)\n\n # evaluate the link frame Jacobian at xyz = [0,0,0] and\n # Wxyz = [0,0,0] to get the center of mass (COM) Jacobian\n E[\"J_com\"] = E[\"J_link\"].subs(zero_xyz + zero_Wxyz)\n # evaluate the joint frame Jacobian at xyz = [0,0,0] and\n # Wxyz = [0,0,0] to get the center of joint (COJ) Jacobian\n E[\"J_coj\"] = E[\"J_joint\"].subs(zero_xyz + zero_Wxyz)\n\n # Mq: joint space inertia matrix of single joint\n E[\"Mq\"] = E[\"J_com\"].T * E[\"M\"] * E[\"J_com\"]\n\n # qFext: joint space matrix of the forces due to external\n # accelerations (such as gravity) on single joint\n E[\"qFext\"] = E[\"J_com\"].T * E[\"M\"] * \\\n sp.Matrix(self.global_syms[\"extAccel\"])\n\n print(f\"rigmech: Calculated {j_name} non-isolated.\")\n\n #\n # create terms common to entire mechanism\n #\n\n # Mq: joint space inertia matrix of entire mechanism\n self.global_syms[\"Mq\"] = sp.zeros(degrees_of_freedom)\n for j_name in self.Joints:\n self.global_syms[\"Mq\"] += self.joint_syms[j_name][\"Mq\"]\n\n # qFext: joint space matrix of the forces due to external\n # accelerations (such as gravity) on entire mechanism\n self.global_syms[\"qFext\"] = sp.zeros(degrees_of_freedom, 1)\n for j_name in self.Joints:\n self.global_syms[\"qFext\"] += self.joint_syms[j_name][\"qFext\"]\n\n # qFrict: joint friction in a convenient list\n self.global_syms[\"qFrict\"] = [\n self.Joints[jnt][\"friction\"]\n for jnt in self.Joints\n if not self.joint_syms[jnt][\"q\"] is 0\n ]\n\n # xyz_com: xyz center of mass of entire mechanism\n total_mass = 0.0\n weighted_mass = sp.Matrix([0, 0, 0])\n for j_name in self.Joints:\n E = self.joint_syms[j_name]\n total_mass += E[\"mass\"]\n weighted_mass += E[\"xyz_com\"] * E[\"mass\"]\n self.global_syms[\"xyz_com\"] = weighted_mass / total_mass\n self.global_syms[\"mass\"] = total_mass\n\n # Cq(q,dq) joint space Coriolis matrix (coriolis and centrifugal terms)\n # of entire mechanism\n i_max, j_max = self.global_syms[\"Mq\"].shape\n Mq = self.global_syms[\"Mq\"]\n q = self.global_syms[\"q\"]\n dq = self.global_syms[\"dq\"]\n Cq = sp.zeros(i_max, j_max)\n for k in range(len(q)):\n for i in range(i_max):\n for j in range(i_max):\n if not dq[k] is 0:\n dmij_dqk = 0 if q[k] is 0 else Mq[i, j].diff(q[k])\n dmik_dqj = 0 if q[j] is 0 else Mq[i, k].diff(q[j])\n dmkj_dqi = 0 if q[i] is 0 else Mq[k, j].diff(q[i])\n Cq[i, j] += (dmij_dqk + dmik_dqj - dmkj_dqi) * dq[k]\n Cq = 0.5 * Cq\n self.global_syms[\"Cq\"] = Cq\n\n # forces due to coriolis matrix in joint space\n self.global_syms[\"qFCoriolis\"] = Cq * sp.Matrix(dq)\n\n print(f\"rigmech: Calculated global_syms.\")\n\n if Simplify:\n print(f\"rigmech: starting simplify()\")\n self.simplify()\n\n if Lambdify:\n print(f\"rigmech: starting lambdify()\")\n self.lambdify(backend)\n\n self.global_syms[\"limits_upper\"] = \\\n np.array([\n [jnt.get('limit_upper', np.Inf)]\n for jnt in self.Joints.values()])\n self.global_syms[\"limits_lower\"] = \\\n np.array([\n [jnt.get('limit_lower', np.NINF)]\n for jnt in self.Joints.values()])\n\n print(f\"rigmech: done\")\n\n return self.joint_syms, self.global_syms", "def _get_J(self, omega, y):\n x = y[:-1]\n newt_lambda = y[-1]\n J = np.zeros([len(x)+1, len(x)+1])\n J[:-1, :-1] = omega + newt_lambda*np.diagflat(1/(x**2))\n J[:-1, -1] = -1/x.ravel()\n J[-1, :-1] = 1\n return J", "def omega ( self ) :\n from GaudiConfUtils.ConfigurableGenerators import DaVinci__N3BodyDecays \n ##\n pre_omega = self.make_selection (\n ## the unique tag \n 'PreOmega' ,\n ## algorithm type to be used\n DaVinci__N3BodyDecays ,\n ## input selections \n [ self.pions () , self.pi0 () ] ,\n ##\n DecayDescriptor = \" omega(782) -> pi+ pi- pi0\" ,\n ## \n Combination12Cut = \"\"\" ( AM < 1 * GeV ) &\n ( ACHI2DOCA(1,2) < 12 ) \n \"\"\" ,\n ## \n CombinationCut = \"\"\"\n ( APT > %s ) & ( ADAMASS ( 'omega(782)' ) < 100 * MeV )\n \"\"\" % ( 0.9 * self['OMEGA_PT'] ),\n ##\n MotherCut = \"\"\"\n ( PT > %s ) &\n ( chi2vx < 9 )\n \"\"\" % self['OMEGA_PT']\n )\n \n from GaudiConfUtils.ConfigurableGenerators import Pi0Veto__Tagger2g\n ## \n return self.make_selection (\n 'Omega' ,\n Pi0Veto__Tagger2g ,\n [ pre_omega ] ,\n MassWindow = 25 * MeV ,\n MassChi2 = -1 ,\n ExtraInfoIndex = 25019 ## unique ! \n )", "def makeenv(self):\n eps=np.ones((self.nx,self.ny))*const.epsilon_0\n mu=np.ones((self.nx,self.ny))*const.mu_0\n\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n #mu[:20,:] /= self.q #adself.ds a space of higher permittivity \n #mu[-20:,:] /= self.q #adself.ds a space of higher permittivity \n #mu[:,:20] /= self.q #adself.ds a space of higher permittivity \n #mu[:,-20:] /= self.q #adself.ds a space of higher permittivity \n\n return eps, mu", "def omega_cyclotron(q, B, mass):\n return q * cgs.e * B / (mass * cgs.c)", "def do_quadrature(self, row, col):\n D = self._packet.get_dimension()\n eps = self._packet.get_eps()\n N = self._packet.get_number_components()\n # Main part of the integrand\n factor = (eps**D * self._weights * self._values[row * N + col]).reshape((-1,))\n # Sum up matrices over all quadrature nodes\n M = einsum(\"k,ik,jk\", factor, conjugate(self._bases[row]), self._bases[col])\n return M", "def allocate_constants(self):\n\n ##########################\n ##### D2Q25 parameters####\n ##########################\n t0 = (4./45.)*(4 + np.sqrt(10))\n t1 = (3./80.)*(8 - np.sqrt(10))\n t3 = (1./720.)*(16 - 5*np.sqrt(10))\n\n w_list = []\n cx_list = []\n cy_list = []\n\n # Mag 0\n cx_list += [0]\n cy_list += [0]\n w_list += [t0*t0]\n\n # Mag 1\n cx_list += [0, 0, 1, -1]\n cy_list += [1, -1, 0, 0]\n w_list += 4*[t0*t1]\n\n # Mag sqrt(2)\n cx_list += [1, 1, -1, -1]\n cy_list += [1, -1, 1, -1]\n w_list += 4*[t1*t1]\n\n # Mag 3\n cx_list += [3, -3, 0, 0]\n cy_list += [0, 0, 3, -3]\n w_list += 4*[t0*t3]\n\n # Mag sqrt(10)\n cx_list += [1, 1, -1, -1, 3, 3, -3, -3]\n cy_list += [3, -3, 3, -3, 1, -1, 1, -1]\n w_list += 8*[t1*t3]\n\n # Mag sqrt(18)\n cx_list += [3, 3, -3, -3]\n cy_list += [3, -3, 3, -3]\n w_list += 4*[t3 * t3]\n\n # Now send everything to disk\n w = np.array(w_list, order='F', dtype=num_type) # weights for directions\n cx = np.array(cx_list, order='F', dtype=int_type) # direction vector for the x direction\n cy = np.array(cy_list, order='F', dtype=int_type) # direction vector for the y direction\n\n self.cs = num_type(np.sqrt(1. - np.sqrt(2./5.))) # Speed of sound on the lattice\n self.num_jumpers = int_type(w.shape[0]) # Number of jumpers: should be 25\n\n self.w = cl.Buffer(self.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=w)\n self.cx = cl.Buffer(self.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=cx)\n self.cy = cl.Buffer(self.context, cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR, hostbuf=cy)", "def _build_proj_equation(free_dims, bound_dims, output_dims):\n import string\n\n _CHR_IDX = string.ascii_lowercase\n input_str = \"\"\n kernel_str = \"\"\n output_str = \"\"\n bias_axes = \"\"\n letter_offset = 0\n for i in range(free_dims):\n char = _CHR_IDX[i + letter_offset]\n input_str += char\n output_str += char\n\n letter_offset += free_dims\n for i in range(bound_dims):\n char = _CHR_IDX[i + letter_offset]\n input_str += char\n kernel_str += char\n\n letter_offset += bound_dims\n for i in range(output_dims):\n char = _CHR_IDX[i + letter_offset]\n kernel_str += char\n output_str += char\n bias_axes += char\n equation = \"%s,%s->%s\" % (input_str, kernel_str, output_str)\n\n return equation, bias_axes, len(output_str)", "def Bo_Bosol_calc(self):\n self.Bosol = (self.g*self.alpha * self.srflx ) \n #ZEROS FOR T3W APPLICATION\n self.Bo = np.zeros([self.b.shape[0]])", "def test_quadrature_grid(self):\n L = 2\n M = 2\n N = 0\n NFP = 1\n\n grid_quad = QuadratureGrid(L, M, N, NFP)\n\n roots, weights = special.js_roots(3, 2, 2)\n\n quadrature_nodes = np.stack(\n [\n np.array([roots[0]] * 5 + [roots[1]] * 5 + [roots[2]] * 5),\n np.array(\n [0, 2 * np.pi / 5, 4 * np.pi / 5, 6 * np.pi / 5, 8 * np.pi / 5] * 3\n ),\n np.zeros(15),\n ]\n ).T\n\n np.testing.assert_allclose(grid_quad.spacing.prod(axis=1), grid_quad.weights)\n np.testing.assert_allclose(grid_quad.nodes, quadrature_nodes)", "def _build_proj_equation(free_dims, bound_dims, output_dims):\n input_str = \"\"\n kernel_str = \"\"\n output_str = \"\"\n bias_axes = \"\"\n letter_offset = 0\n for i in range(free_dims):\n char = _CHR_IDX[i + letter_offset]\n input_str += char\n output_str += char\n\n letter_offset += free_dims\n for i in range(bound_dims):\n char = _CHR_IDX[i + letter_offset]\n input_str += char\n kernel_str += char\n\n letter_offset += bound_dims\n for i in range(output_dims):\n char = _CHR_IDX[i + letter_offset]\n kernel_str += char\n output_str += char\n bias_axes += char\n equation = \"%s,%s->%s\" % (input_str, kernel_str, output_str)\n\n return equation, bias_axes, len(output_str)", "def W(self):\n if not self.isVaild():\n pass\n return self.Wq() + 1.0/self.muy", "def __init__(self, p, q):\n self.p = p\n self.q = q\n # biais des unités d’entrée) -> dim (1xp)\n self.a = np.zeros((1, self.p))\n # biais des unités de sortie -> dim (1xq)\n self.b = np.zeros((1, self.q))\n # initialisés aléatoirement suivant une loi normale centrée, de variance égale à 0.01\n self.W = np.random.normal(loc=0, scale=0.1, size=(self.p, self.q))", "def generateRHS(T, sigma, qdx):\n\n b = T[1:-1]*1./sigma\n # Consider Dirichlet BC\n b[0] += T[0]\n # Consider Neumann BC\n b[-1] += qdx\n\n return b", "def _buildWeights(self):\r\n # Compute the spatial tree\r\n kd = spatial.cKDTree(self.XYin)\r\n \r\n # Perform query on all of the points in the grid\r\n dist,self.ind=kd.query(self.XYout,distance_upper_bound=self.maxdist,k=self.NNear)\r\n \r\n self.Nc = np.size(self.ind,axis=0)\r\n print '%d interpolation points.'%self.Nc\r\n # Now loop through and get the weights for each point\r\n self.W = np.zeros((self.NNear,self.Nc))\r\n\r\n # Print percentages\r\n p0=0\r\n pstep=5\r\n for ii in range(0,self.Nc):\r\n \r\n if self.verbose:\r\n pfinish = float(ii)/float(self.Nc)*100.0\r\n if pfinish> p0:\r\n print '%3.1f %% complete...'%pfinish\r\n p0+=pstep\r\n \r\n W = self.getWeights(dist[ii,:],self.XYin[self.ind[ii,:],0],self.XYin[self.ind[ii,:],1])\r\n self.W[:,ii] = W.T", "def _calculateOmegaOpt(N, gluonDOF, delta, incidentGaugeField, targetAdjointWilsonLine):\n\n # 2,2 is for the 2 dimensions, x and y\n omega = np.zeros((N, N, 2, 2, gluonDOF), dtype='complex') # 2 is for two dimensions, x and y\n\n derivs = [_x_deriv, _y_deriv]\n\n for i in range(N):\n for j in range(N):\n for k in range(gluonDOF):\n for l in range(2): # 2 is number of dimensions\n for n in range(2): # 2 is number of dimensions\n omega[i,j,l,n,k] = np.sum(np.array([derivs[l](incidentGaugeField[:,:,m], i, j, N, delta) * derivs[n](targetAdjointWilsonLine[:,:,k,m], i, j, N, delta) for m in range(gluonDOF)]))\n\n return omega" ]
[ "0.6052389", "0.5725959", "0.5695479", "0.5676664", "0.5578913", "0.5474302", "0.5401909", "0.5374855", "0.53578025", "0.5349752", "0.53495365", "0.5322237", "0.53042483", "0.5294827", "0.5287967", "0.52565825", "0.52493024", "0.5245003", "0.52133757", "0.5203506", "0.51762134", "0.51582706", "0.5133974", "0.5127998", "0.51276493", "0.5119619", "0.50985813", "0.5093262", "0.50863916", "0.507429" ]
0.74617565
0
Initializes a list of Log Levels
def init(cls, levels: List[str]) -> List[Level]: return [cls(lvl, val) for val, lvl in enumerate(levels)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _setup_log_levels(self):\n for logger_name, level in self.named_levels.items():\n logger = logging.getLogger(logger_name)\n logger.setLevel(level)\n LOG.info(\"Set %s to use logging level %s\", logger_name, level)", "def test_level_values(self) -> None:\n self.assertEqual(LogLevels.NOTSET, 0)\n self.assertEqual(LogLevels.DEBUG, 10)\n self.assertEqual(LogLevels.VERBOSE, 15)\n self.assertEqual(LogLevels.INFO, 20)\n self.assertEqual(LogLevels.NOTICE, 25)\n self.assertEqual(LogLevels.WARNING, 30)\n self.assertEqual(LogLevels.SUCCESS, 35)\n self.assertEqual(LogLevels.ERROR, 40)\n self.assertEqual(LogLevels.CRITICAL, 50)", "def getLevels():", "def __init__(self, level_log):\n self.logger = Logger(self.__class__.__name__, level_log).get()\n\n self.bank_holidays.append([2018, 5, 7])\n self.bank_holidays.append([2018, 6, 4])\n self.bank_holidays.append([2018, 8, 6])\n self.bank_holidays.append([2018, 10, 29])\n self.bank_holidays.append([2018, 12, 25])\n self.bank_holidays.append([2018, 12, 26])\n self.bank_holidays.append([2018, 12, 27])", "def init(level):\n Log.chosen_level = level\n logging.basicConfig(\n format=\"%(levelname)s\\t%(name)s\\t%(asctime)s\\t%(message)s\",\n level=level)", "def testLoggerLevels(self):\n logging = Logger()\n for level in range(len(logging.LEVELS)):\n testString = \"Test logging level\"\n logging.setLogLevel(level)\n logging.log(level, testString)", "def __init__(self, name, log_level):\n self._logger = logging.getLogger(str(name))\n if log_level not in [10, 20, 30, 40]:\n self._logger.setLevel(logging.INFO)\n self.write(\"LOGGER_INVALID_LEVEL\")\n else:\n self._logger.setLevel(log_level)", "def __init__(self, level=logging.DEBUG):\n\t\tself.level = level", "def __init__(self, logger, level):\n self.logger = logger\n self.level = level", "def __init__(self, logger, level):\n self.logger = logger\n self.level = level", "def __init__(self, logger, level):\n self.logger = logger\n self.level = level", "def levels(self):\n raise NotImplementedError(\"Subclasses sohuld implement levels\")", "def __init__(self, logger, level):\n\t\tself.logger = logger\n\t\tself.level = level", "def __init__(self, logger, level):\n self.logger = logger\n self.level = level", "def log_init(level_name: str) -> None:\n fallback_log_level = \"INFO\"\n ot_log_level = level_name.upper()\n if ot_log_level not in logging._nameToLevel:\n sys.stderr.write(\n f\"OT Log Level {ot_log_level} not found. \"\n f\"Defaulting to {fallback_log_level}\\n\"\n )\n ot_log_level = fallback_log_level\n level_value = logging._nameToLevel[ot_log_level]\n logging_config = _config(ARCHITECTURE, level_value)\n dictConfig(logging_config)", "def init(log_name, log_level):\n global _logger\n\n assert _logger is None\n _logger = LogLevelObserver(log_name, log_level)\n _logger.start()", "def initialize(context, level):\n if not Log.initialized:\n Log.logger = logging.getLogger(context)\n Log.initialized = True\n logging.basicConfig(\n filename=CONST.APP_LOG_FILENAME,\n format=CONST.APP_LOG_FORMAT,\n datefmt='%Y-%m-%d %H:%M:%S'\n )\n Log.logger.setLevel(level)\n Log.logger.log(50, 'Logging initialised, level={}'.format(level))\n return Log.logger", "def _logging_levels(self, level):\n levels = {\n \"info\": logging.INFO,\n \"error\": logging.ERROR,\n \"debug\": logging.DEBUG,\n \"warning\": logging.WARNING,\n \"critical\": logging.CRITICAL,\n }\n return levels[level]", "def set_default_for_default_log_levels():\n\n extra_log_level_defaults = [\n 'dogpile=INFO',\n 'routes=INFO',\n 'keystone.common._memcache_pool=INFO',\n ]\n\n def find_default_log_levels_opt():\n for opt in log.log_opts:\n if opt.dest == 'default_log_levels':\n return opt\n\n opt = find_default_log_levels_opt()\n opt.default.extend(extra_log_level_defaults)", "def log_all_levels(logger_instance):\n for log_level in list(new_loglevel_dict.keys()) + list(standard_loglevel_dict.keys()):\n getattr(logger_instance, log_level.lower())('test ' + log_level)", "def init_logger(self):\n\n if self.args.log_level:\n log_level = getattr(logging, self.args.log_level)\n if coloredlogs:\n coloredlogs.install(level=log_level, fmt=LOG_FMT)\n else:\n logging.basicConfig(level=log_level)\n ch = logging.StreamHandler()\n formatter = logging.Formatter(LOG_FMT)\n ch.setFormatter(formatter)\n elif coloredlogs:\n coloredlogs.install(level='INFO', fmt=LOG_FMT)\n\n if coloredlogs:\n effective_level = coloredlogs.get_level()\n else:\n effective_level = logger.getEffectiveLevel()\n\n # make sure warning and error display at any effective level\n if effective_level > logging.WARNING:\n self.warning = logger.critical\n else:\n self.warning = logger.warning\n\n if effective_level > logging.ERROR:\n self.error = logger.critical\n else:\n self.error = logger.error\n\n self.info = logger.info\n self.debug = logger.debug\n self.exception = logger.exception\n self.critical = logger.critical", "def reset_logger():\n for log in ALL_LEVELS:\n setattr(vlog.Vlog, _LOG_MAPPING[log].__name__, _LOG_MAPPING[log])", "def initialize(self, log_level: int = 0):\n # Only the first accessing thread will call initialize:\n self._lock().count_up(lambda: self._initialize(log_level))", "def level_names(self):\n return list(self._levels)", "def init_logging(log_level):\n logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', level=getattr(logging, log_level.upper(), None))", "def Levels(self):\n raise NotImplementedError(\"Subclass must implement abstract method\")", "def __set_level(self,L):\n assert isinstance(L,level)\n self.__level = L", "def createLevelMap(self):\n for a in self.hierarchy.iterkeys():\n self.lvl = 0\n self.calcLevel(a)\n if self.lvl > self.levelMap.highestLevel: self.levelMap.highestLevel = self.lvl\n self.levelMap.addLevelData(AgentName=a, Level=self.lvl)", "def createLevelMap(self):\n\t\tfor a in self.hierarchy.iterkeys():\n\t\t\tself.lvl = 0\n\t\t\tself.calcLevel(a)\n\t\t\tif self.lvl > self.levelMap.highestLevel: self.levelMap.highestLevel = self.lvl\n\t\t\tself.levelMap.addLevelData(AgentName=a, Level=self.lvl)", "def levels(self):\n return list(self._levels.values())" ]
[ "0.6628986", "0.657911", "0.65455437", "0.64682525", "0.63555413", "0.62843335", "0.62470645", "0.61197793", "0.59739", "0.59739", "0.59739", "0.59697783", "0.59558034", "0.59521854", "0.59476656", "0.59133816", "0.58976674", "0.5889272", "0.58495504", "0.5845259", "0.58044326", "0.5796314", "0.5775065", "0.573272", "0.57254565", "0.57068974", "0.5698169", "0.56753284", "0.5669662", "0.56538326" ]
0.778308
0
Given a nonempty array of integers arr and an integer k, return the sum of the first k element that has at most two digits.
def add_elements(arr, k): return sum(elem for elem in arr[k] if len(str(elem)) <= 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def twoSumLessThanK(nums, k):\n nums.sort()\n start = 0\n end = len(nums) - 1\n max_sum = -1\n while start < end:\n curr_sum = nums[start] + nums[end]\n if curr_sum < k:\n max_sum = max(curr_sum, max_sum)\n if curr_sum >= k:\n end -= 1\n else:\n start += 1\n\n return max_sum", "def find_subarrays(nums, k):\n res = pre_sum = 0\n dic = {0: 1}\n for i in nums:\n pre_sum += i\n res += dic.get(pre_sum - k, 0)\n dic[pre_sum] = dic.get(pre_sum, 0) + 1\n return res", "def numSubarrayProductLessThanK(self, nums: List[int], k: int) -> int:\n\n if not nums:\n return 0\n\n if k <= 1:\n return 0\n\n count = 0\n lo = 0\n product = 1\n for hi in range(len(nums)):\n product *= nums[hi]\n while product >= k:\n product /= nums[lo]\n lo += 1\n count += hi - lo + 1\n return count", "def findKthPositive(self, arr: 'List[int]', k: int) -> int:\n for elem in arr:\n if k < elem:\n return k\n k += 1\n return k", "def check_through_arr(arr, k):\n i = 0\n j = 1\n count = 0\n while j < len(arr):\n if arr[i] + k > arr[j]:\n j += 1\n elif arr[i] + k == arr[j]:\n count += 1\n i += 1\n j += 1\n else:\n i += 1\n\n return count", "def find_maximal_subarray_sum(nums: List[int], k: int) -> int:\n maximal_subarray_sum = nums[0]\n numb_of_elements = len(nums)\n\n for i in range(0, numb_of_elements - k + 1, 1):\n temp_max = nums[i]\n temp_sum = nums[i]\n for j in range(i + 1, i + k):\n temp_sum += nums[j]\n if temp_sum > temp_max:\n temp_max = temp_sum\n if temp_max > maximal_subarray_sum:\n maximal_subarray_sum = temp_max\n\n return maximal_subarray_sum", "def kth_largest(arr: list, k: int):\n # Do not search if k is larger than total number of elements\n if k > len(arr):\n raise IndexError\n # Count all numbers\n nums = Counter(arr)\n # Go from the largest to smaller ones\n for key in sorted(nums, reverse=True):\n if nums[key] >= k:\n return key\n else:\n k -= nums[key]", "def fn(arr, k):\n ans = []\n for i, x in enumerate(arr): \n while ans and ans[-1] < x and len(ans) + len(arr) - i > k: ans.pop()\n if len(ans) < k: ans.append(x)\n return ans", "def find_max_in_array(arr, k):\r\n print(\" Amazon interview question\")\r\n arr[:] = sorted(arr)\r\n return ((arr[len(arr)-k]))", "def find_pair_with_sum_1(arr, k):\n n = len(arr)\n\n def find_pivot(arr):\n start = 0\n end = len(arr) - 1\n\n if arr[end] > arr[start]:\n return 0\n\n while end >= start:\n mid = start + (end-start)//2\n\n if arr[mid] > arr[mid+1]:\n return mid+1\n if arr[mid] < arr[mid-1]:\n return mid\n\n if arr[mid] > arr[start]:\n start = mid+1\n else:\n end = mid-1\n\n smallest_index = find_pivot(arr)\n print(\"Pivot is at \", smallest_index)\n largest_index = smallest_index-1\n\n while smallest_index != largest_index:\n curr_sum = arr[smallest_index] + arr[largest_index]\n\n if curr_sum == k:\n return (arr[smallest_index], arr[largest_index])\n\n if curr_sum > k:\n largest_index = (largest_index - 1) % n\n else:\n smallest_index = (smallest_index + 1) % n\n\n return \"Not Found\"", "def findKthLargest(self, nums: List[int], k: int) -> int:\n return sorted(nums)[-k]", "def find_max_average_sub(nums, k):\n\n\n\tresult = sum(nums[:k]) / k\n\tcurrent = result\n\n\tfor i in range(1, len(nums) - k + 1):\n\n\t\tcurrent = (current * k - nums[i - 1] +\\\n\t\t nums[i + k - 1]) / k\n\n\t\tif result < current:\n\t\t\tresult = current\n\n\n\treturn result", "def arrayMaxConsecutiveSum(inputArray, k):\n if k == 1:\n return max(inputArray)\n \n sub = inputArray[0:k]\n largest = sum(sub)\n result = largest\n \n for val in inputArray[k:]:\n largest -= sub[0]\n sub.remove(sub[0])\n sub.append(val)\n largest += val\n \n if largest > result:\n result = largest\n\n return result", "def count(arr, k):\n dp = [[None]*(k+1) for _ in range(len(arr)+1)]\n for i in range(len(dp)):\n dp[i][0] = 1\n for i in range(1, len(dp[0])):\n dp[0][i] = 0\n for a in dp:\n print(a)\n for i in range(1, len(dp)):\n for j in range(1, len(dp[0])):\n if arr[i-1] <= j:\n dp[i][j] = dp[i-1][j-arr[i-1]] + dp[i-1][j]\n else:\n dp[i][j] = dp[i-1][j]\n for a in dp:\n print(a)\n return dp[-1][-1]", "def solution2(nums, K):\n s = 0\n sum_til = []\n for n in nums:\n s += n\n sum_til.append(s)\n\n l = len(nums)\n for i in range(l):\n for j in range(i+1, l):\n sum_ij = sum_til[j] if i == 0 else sum_til[j] - sum_til[i-1]\n if K != 0 and sum_ij % K == 0:\n return True\n if K == 0 and sum_ij == 0:\n return True\n return False", "def maximum_value(arr: [int], k: int) -> [int]:\n m = []\n res = []\n for i in range(len(arr)):\n if len(m) == k:\n res.append(max(m))\n if len(m) == k:\n m[i%k] = arr[i]\n else:\n m.append(arr[i])\n res.append(max(m))\n return res", "def top_k_frequent_elements(nums, k):\r\n freq_dict = {}\r\n for elem in nums:\r\n freq_dict[elem] = freq_dict.get(elem, 0) + 1\r\n \r\n return sorted(freq_dict.keys(), key= lambda x: freq_dict[x], reverse=True)[:k]", "def rank(self, k, arr):\n\n # arr must be sorted\n if not(arr[0] < arr[len(arr)//2] < arr[len(arr)-1]):\n raise ValueError(\"Array must be sorted\")\n\n lo = 0\n hi = len(arr) - 1\n\n while lo <= hi:\n mid = lo + (hi - lo) // 2\n\n if k < arr[mid]:\n hi = mid - 1\n elif k > arr[mid]:\n lo = mid + 1\n else:\n return mid\n\n return -1", "def find_maximal_subarray_sum_deque(nums: List[int], k: int) -> int:\n if k == 1:\n return max(nums)\n\n q = deque()\n q.append(nums[0])\n curr_sum = nums[0]\n max_sum = curr_sum\n for x in nums[1:]:\n first = 0\n if len(q) == k:\n first = q.popleft()\n if curr_sum + x - first >= curr_sum:\n q.append(x)\n curr_sum += x - first\n else:\n q.clear()\n q.append(x)\n if curr_sum > max_sum:\n max_sum = curr_sum\n curr_sum = x\n if curr_sum > max_sum:\n max_sum = curr_sum\n return max_sum", "def maxResult(self, nums: List[int], k: int) -> int:\n # Solution 1 - 964 ms\n # Solution 2 - 864 ms\n n = len(nums)\n if n == 0:\n return 0\n if n == 1:\n return nums[0]\n\n dp = [0] * n\n dp[0] = nums[0]\n max_sum = dp[0]\n max_sum_pointer = 0\n for i in range(1, n):\n if max_sum_pointer >= i - k:\n if max_sum < dp[i - 1] and i > 0:\n max_sum = dp[i - 1]\n max_sum_pointer = i - 1\n else:\n if i - k > 0:\n max_sum = dp[i - k]\n max_sum_pointer = i - k\n for p in range(i - k, i):\n if max_sum <= dp[p]:\n max_sum = dp[p]\n max_sum_pointer = p\n\n dp[i] = max_sum + nums[i]\n\n dp[-1] = max_sum + nums[-1]\n return dp[-1]", "def arrayMaxConsecutiveSum(inputArray, k):\n\n max_sum = 0\n \n for i in range(k):\n max_sum += inputArray[i]\n \n new_sum = max_sum\n \n for i in range(len(inputArray)-k):\n \n new_sum = new_sum - inputArray[i] + inputArray[i + k]\n \n if new_sum > max_sum:\n max_sum = new_sum\n \n return max_sum", "def hit_rate_at_k(rs, k):\n if k < 1 or k > len(rs[0]):\n raise ValueError('k value must be >=1 and < Max Rank')\n hits = 0\n for r in rs:\n if np.sum(r[:k]) > 0: hits += 1\n\n return hits / len(rs)", "def maxSumOfThreeSubarrays(self, nums: List[int], k: int) -> List[int]:\n\n n = len(nums)\n if n < 3 * k or k == 0:\n return 0\n\n prefix_sum = [0]\n for num in nums:\n prefix_sum.append(prefix_sum[-1] + num)\n\n left = [0] * n\n left_i = [0] * n\n right = [0] * (n + 1) # add one to right (for case of k == 1)\n right_i = [0] * (n + 1)\n\n for i in range(k - 1, n):\n window = prefix_sum[i + 1] - prefix_sum[i + 1 - k]\n if window > left[i - 1]: # > cause we prefex left start\n left[i] = window\n left_i[i] = i - (k - 1)\n else:\n left[i] = left[i - 1]\n left_i[i] = left_i[i - 1]\n\n for i in reversed(range(n - k + 1)):\n window = prefix_sum[i + k] - prefix_sum[i]\n if window >= right[i + 1]: # >= cause we prefex left start\n right[i] = window\n right_i[i] = i\n else:\n right[i] = right[i + 1]\n right_i[i] = right_i[i + 1]\n\n max_sum = 0\n a, b, c = 0, 0, 0\n for i in range(k, n - 2 * k + 1):\n curr_sum = prefix_sum[i + k] - prefix_sum[i] + left[i - 1] + right[i + k]\n if curr_sum > max_sum:\n max_sum = curr_sum\n a, b, c = left_i[i - 1], i, right_i[i + k]\n\n return [a, b, c]", "def first_larger(array, k):\n\t\n\t# check if array is None, empty,...\n\tif array is None or len(array) == 0:\n\t\treturn False\n\n\tif array[-1] <= k:\n\t\treturn False\n\n\tbegin = 0\n\tend = len(array)\n\n\twhile begin + 1 < end:\n\t\tmed = begin + (end-begin) / 2\n\t\tif array[med] == k: \n\t\t\twhile array[med] == k:\n\t\t\t\tmed += 1\n\t\t\treturn med\n\t\telif array[med] > k:\n\t\t\tend = med\n\t\telse:\n\t\t\tbegin = med\n\n\treturn end", "def get_kth_ugly_number(k):\n count = 0; i = 0\n while count < k:\n i += 1\n if is_ugly(i):\n count += 1\n return i", "def find_pairs(arr, k): \n num_dict = {}\n final_arr = []\n for num in arr:\n x = num + k\n num_dict[x] = num\n for num in num_dict:\n if num in arr:\n final_arr.append([num, num_dict[num]])\n\n return final_arr", "def sort_k_messed_array(arr, k):\n\n if k == 0:\n return arr\n\n for i in range(len(arr)):\n min_index = find_min_index(arr, i, i + k)\n arr[i], arr[min_index] = arr[min_index], arr[i]\n\n return arr", "def largestSubarray(self, nums: List[int], k: int) -> List[int]:\n\n if k == 1:\n return [max(nums)]\n\n hash_map = {}\n for i, n in enumerate(nums):\n hash_map[n] = i\n\n candidates = nums[: len(nums) - k + 1]\n print(candidates)\n mx = max(candidates)\n mx_idx = hash_map[mx]\n op = nums[mx_idx : mx_idx + k]\n return op", "def three_sum(int_array, k):\n\tint_dict = {}\n\tint_set = set(int_array)\n\n\tfor i, num in enumerate(int_array):\n\t\tint_dict.setdefault(num, []).append(i)\n\n\tfor i in range(len(int_array)):\n\t\tfor j in range(i+1, len(int_array)):\n\t\t\tdifference = int_array[i] + int_array[j]\n\t\t\tif k - difference in int_set:\n\t\t\t\treturn i, j, int_dict[k - difference][0]\n\n\treturn None, None, None", "def kth_smallest_different_sizes(arr1, arr2, k):\n if not arr1:\n return arr2[k - 1]\n elif not arr2:\n return arr1[k - 1]\n\n if k == 1:\n return min(arr1[0], arr2[0])\n\n i = min(len(arr1), k // 2)\n j = min(len(arr2), k // 2)\n if arr1[i - 1] < arr2[j - 1]:\n return kth_smallest(arr1[i:], arr2, k - i)\n return kth_smallest(arr1, arr2[j:], k - j)" ]
[ "0.7111014", "0.7106602", "0.6709087", "0.6570492", "0.65060997", "0.6502326", "0.64653206", "0.64385146", "0.6366034", "0.63578564", "0.63147587", "0.62667817", "0.6192361", "0.6190565", "0.6170776", "0.6143347", "0.61372757", "0.6136915", "0.61089677", "0.6067554", "0.60127693", "0.5999773", "0.59966165", "0.59828854", "0.5927162", "0.58691514", "0.5811501", "0.5749162", "0.57489526", "0.5732334" ]
0.7140078
0
_dqmHarvesting_ DQM Harvesting for RelVal MC production
def dqmHarvesting(self, datasetName, runNumber, globalTag, **args): options = defaultOptions options.scenario = "pp" options.step = "HARVESTING:validationHarvesting+dqmHarvesting" options.isMC = True options.isData = False options.beamspot = None options.eventcontent = None options.name = "EDMtoMEConvert" options.conditions = "FrontierConditions_GlobalTag,%s" % globalTag options.arguments = "" options.evt_type = "" options.filein = [] process = cms.Process("HARVESTING", self.eras) process.source = cms.Source("PoolSource") configBuilder = ConfigBuilder(options, process = process) configBuilder.prepare() # # customise process for particular job # process.source.processingMode = cms.untracked.string('RunsAndLumis') process.source.fileNames = cms.untracked(cms.vstring()) process.maxEvents.input = -1 process.dqmSaver.workflow = datasetName return process
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ADM_SM_QCD(nf):\n\n adm_qqp_qqp = np.array([[0, 0, 0, 0, 0, 12, 0, 0],\n [0, 0, 0, 0, 12, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 12],\n [0, 0, 0, 0, 0, 0, 12, 0],\n [0, 8/3, 0, 0, - 19/3, 5, 0, 0],\n [8/3, 0, 0, 0, 5, - 9, 0, 0],\n [0, 0, 0, 8/3, 0, 0, - 23/3, 5],\n [0, 0, 8/3, 0, 0, 0, 5, - 23/3]])\n\n adm_qqp_qqpp = np.array([[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 4/3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4/3, 0],\n [0, 0, 0, 0, 0, 0, 0, 0]])\n\n adm_qpq_qppq = np.array([[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 4/3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 4/3]])\n\n adm_qqp_qppq = np.array([[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 4/3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 4/3],\n [0, 0, 0, 0, 0, 0, 0, 0]])\n\n adm_qpq_qqpp = np.array([[0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 4/3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 4/3, 0]])\n\n adm_q_q = np.array([[4, 4, 0, - 28/3],\n [0, 0, 0, 44/3],\n [0, 0, 44/9, 0],\n [5/3, 13/3, 0, - 106/9]])\n\n adm_qqp_q = np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 4/3],\n [0, 0, 0, 0],\n [0, 0, 4/9, 0],\n [0, 0, 0, 0]])\n\n\n adm_qpq_q = np.array([[0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 0, 4/3],\n [0, 0, 0, 0],\n [0, 0, 0, 0],\n [0, 0, 4/9, 0]])\n\n adm_q_qqp = np.array([[0, 0, 0, 0, 8/3, 0, 0, 0],\n [0, 0, 0, 0, 8/3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 8/3, 0],\n [0, 0, 0, 0, 20/9, 0, 0, 0]])\n\n adm_q_qpq = np.array([[0, 0, 0, 0, 8/3, 0, 0, 0],\n [0, 0, 0, 0, 8/3, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 8/3],\n [0, 0, 0, 0, 20/9, 0, 0, 0]])\n\n adm_ud = np.hstack((adm_qqp_qqp, adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqpp, adm_qpq_qqpp, adm_qpq_qqpp,\\\n adm_qpq_qqpp, np.zeros((8, 24)), adm_qqp_q, adm_qpq_q, np.zeros((8,12))))\n\n adm_us = np.hstack((adm_qqp_qqpp, adm_qqp_qqp, adm_qqp_qqpp, adm_qqp_qqpp, adm_qpq_qppq, np.zeros((8,16)),\\\n adm_qpq_qqpp, adm_qpq_qqpp, np.zeros((8, 8)), adm_qqp_q, np.zeros((8,4)), adm_qpq_q, np.zeros((8,8))))\n\n adm_uc = np.hstack((adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqp, adm_qqp_qqpp, np.zeros((8,8)), adm_qpq_qppq,\\\n np.zeros((8,8)), adm_qpq_qppq, np.zeros((8, 8)), adm_qpq_qqpp, adm_qqp_q, np.zeros((8,8)), adm_qpq_q, np.zeros((8,4))))\n\n adm_ub = np.hstack((adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqp, np.zeros((8,16)), adm_qpq_qppq,\\\n np.zeros((8,8)), adm_qpq_qppq, adm_qpq_qppq, adm_qqp_q, np.zeros((8,12)), adm_qpq_q))\n\n adm_ds = np.hstack((adm_qqp_qppq, adm_qpq_qppq, np.zeros((8,16)), adm_qqp_qqp, adm_qqp_qqpp, adm_qqp_qqpp,\\\n adm_qpq_qqpp, adm_qpq_qqpp, np.zeros((8,8)), np.zeros((8,4)), adm_qqp_q, adm_qpq_q, np.zeros((8,8))))\n\n adm_dc = np.hstack((adm_qqp_qppq, np.zeros((8,8)), adm_qpq_qppq, np.zeros((8,8)), adm_qqp_qqpp, adm_qqp_qqp, adm_qqp_qqpp,\\\n adm_qpq_qppq, np.zeros((8,8)), adm_qpq_qqpp, np.zeros((8,4)), adm_qqp_q, np.zeros((8,4)), adm_qpq_q, np.zeros((8,4))))\n\n adm_db = np.hstack((adm_qqp_qppq, np.zeros((8,16)), adm_qpq_qppq, adm_qqp_qqpp, adm_qqp_qqpp, adm_qqp_qqp,\\\n np.zeros((8,8)), adm_qpq_qppq, adm_qpq_qppq, np.zeros((8,4)), adm_qqp_q, np.zeros((8,8)), adm_qpq_q))\n\n adm_sc = np.hstack((np.zeros((8,8)), adm_qqp_qppq, adm_qpq_qppq, np.zeros((8,8)), adm_qqp_qppq, adm_qpq_qppq, np.zeros((8,8)),\\\n adm_qqp_qqp, adm_qqp_qqpp, adm_qpq_qqpp, np.zeros((8,8)), adm_qqp_q, adm_qpq_q, np.zeros((8,4))))\n\n adm_sb = np.hstack((np.zeros((8,8)), adm_qqp_qppq, np.zeros((8,8)), adm_qpq_qppq, adm_qqp_qppq, np.zeros((8,8)), adm_qpq_qppq,\\\n adm_qqp_qqpp, adm_qqp_qqp, adm_qpq_qppq, np.zeros((8,8)), adm_qqp_q, np.zeros((8,4)), adm_qpq_q))\n\n adm_cb = np.hstack((np.zeros((8,16)), adm_qqp_qppq, adm_qpq_qppq, np.zeros((8,8)), adm_qqp_qppq, adm_qpq_qppq,\\\n adm_qqp_qppq, adm_qpq_qppq, adm_qqp_qqp, np.zeros((8,12)), adm_qqp_q, adm_qpq_q))\n\n adm_u = np.hstack((adm_q_qqp, adm_q_qqp, adm_q_qqp, adm_q_qqp, np.zeros((4,48)), adm_q_q, np.zeros((4,16))))\n\n adm_d = np.hstack((adm_q_qpq, np.zeros((4,24)), adm_q_qqp, adm_q_qqp, adm_q_qqp, np.zeros((4,24)), np.zeros((4,4)), adm_q_q, np.zeros((4,12))))\n\n adm_s = np.hstack((np.zeros((4,8)), adm_q_qpq, np.zeros((4,16)), adm_q_qpq, np.zeros((4,16)), adm_q_qqp, adm_q_qqp, np.zeros((4,8)),\\\n np.zeros((4,8)), adm_q_q, np.zeros((4,8))))\n\n adm_c = np.hstack((np.zeros((4,16)), adm_q_qpq, np.zeros((4,16)), adm_q_qpq, np.zeros((4,8)), adm_q_qpq, np.zeros((4,8)), adm_q_qqp,\\\n np.zeros((4,12)), adm_q_q, np.zeros((4,4))))\n\n adm_b = np.hstack((np.zeros((4,24)), adm_q_qpq, np.zeros((4,16)), adm_q_qpq, np.zeros((4,8)), adm_q_qpq, adm_q_qpq, np.zeros((4,16)), adm_q_q))\n\n\n adm = np.vstack((adm_ud, adm_us, adm_uc, adm_ub, adm_ds, adm_dc, adm_db, adm_sc, adm_sb, adm_cb, adm_u, adm_d, adm_s, adm_c, adm_b))\n\n if nf == 5:\n return adm\n elif nf == 4:\n return np.delete(np.delete(adm, np.r_[np.s_[24:32], np.s_[48:56], np.s_[64:80], np.s_[96:100]], 0),\\\n np.r_[np.s_[24:32], np.s_[48:56], np.s_[64:80], np.s_[96:100]], 1)\n else:\n raise Exception(\"nf has to be 4 or 5\")", "def test_build_dmp_query1(self):\n # with dummy values\n patient_id = \"foo\"\n bait_set = \"bar\"\n dmp_query = build_dmp_query(patient_id, bait_set)\n expected_query = Q(metadata__cmo_assay=\"\") & Q(metadata__patient__cmo=\"foo\") & Q(metadata__type=\"N\")\n # (AND: ('filemetadata__metadata__cmo_assay', ''), ('filemetadata__metadata__patient__cmo', 'foo'), ('filemetadata__metadata__type', 'N'))\n self.assertEqual(dmp_query, expected_query)\n\n # with dummy CMO-ID style patient ID\n patient_id = \"C-foo\"\n bait_set = \"bar\"\n dmp_query = build_dmp_query(patient_id, bait_set)\n expected_query = Q(metadata__cmo_assay=\"\") & Q(metadata__patient__cmo=\"foo\") & Q(metadata__type=\"N\")\n self.assertEqual(dmp_query, expected_query)\n\n # dummy CMO-ID style patient ID and partially matching bait_set impact341\n patient_id = \"C-foo1\"\n bait_set = \"IMPACT341_foo\"\n dmp_query = build_dmp_query(patient_id, bait_set)\n expected_query = Q(metadata__cmo_assay=\"IMPACT341\") & Q(metadata__patient__cmo=\"foo1\") & Q(metadata__type=\"N\")\n self.assertEqual(dmp_query, expected_query)", "def test_dq_rules(self,DQ):\r\n pass", "def emm(dataset):\r\n\r\n ####################### CONFIGURE THIS ##############################\r\n\r\n #Define subgroup\r\n #subgroup = dataset[(dataset['dvce_type'] == 'Tablet')]\r\n subgroup = dataset[(dataset['os_timezone'].str.contains(\"Asia\") & (dataset['os_name'].str.contains(\"iPhone\")))]\r\n\r\n #Define target 1\r\n target1 = 'revenue'\r\n\r\n #Define target 2\r\n target2 = 'new_buttons'\r\n\r\n #####################################################################\r\n\r\n logging.info(\"Exceptional Model Mining. (Two targets)\")\r\n\r\n lengthDataset = len(dataset)\r\n logging.debug('Examples of the dataset {}'.format(lengthDataset)) \r\n logging.debug('Examples of subgroup: {} ({:.2f}%)'.format(len(subgroup), (len(subgroup)/lengthDataset) * 100))\r\n correlationTargets = phi_coefficient (dataset,target1,target2)\r\n logging.debug('Correlation of the two targets: {:.2f}'.format(correlationTargets))\r\n \r\n evaluate(QualityMeasure.SCD,ModelClass.PhiCoefficient,dataset,subgroup,target1,target2)", "def startDQM(run, startLumi, daq, dqmRunKey, ecalIn, esIn, logFile):\n\n logFile.write('Processing run', run)\n\n if dqmRunKey == 'cosmic_run':\n workflowBase = 'Cosmics'\n elif dqmRunKey == 'pp_run':\n workflowBase = 'Protons'\n elif dqmRunKey == 'hi_run':\n workflowBase = 'HeavyIons'\n else:\n workflowBase = 'All'\n\n procs = {}\n\n if daq == 'central':\n# commonOptions = 'runNumber={run} runInputDir={inputDir} workflow=/{dataset}/{period}/CentralDAQ'.format(run = run, inputDir = '/tmp/onlineDQM', dataset = workflowBase, period = config.period)\n\n# if ecalIn:\n# ecalOptions = 'environment=PrivLive outputPath={outputPath} verbosity={verbosity}'.format(outputPath = config.tmpoutdir, verbosity = VERBOSITY)\n#\n# log = open(config.logdir + '/ecal_dqm_sourceclient-privlive_cfg.log', 'a')\n# log.write('\\n\\n\\n')\n# command = 'source $HOME/DQM/cmssw.sh; exec cmsRun {conf} {common} {ecal} {spec}'.format(conf = config.workdir + '/ecalConfigBuilder.py', common = commonOptions, ecal = ecalOptions, spec = 'cfgType=Physics')\n# proc = subprocess.Popen(command, shell = True, stdout = log, stderr = subprocess.STDOUT)\n# logFile.write(command)\n# procs['Physics'] = (proc, log)\n \n# log = open(config.logdir + '/ecalcalib_dqm_sourceclient-privlive_cfg.log', 'a')\n# log.write('\\n\\n\\n')\n# command = 'source $HOME/DQM/cmssw.sh; exec cmsRun {conf} {common} {ecal} {spec}'.format(conf = config.workdir + '/ecalConfigBuilder.py', common = commonOptions, ecal = ecalOptions, spec = 'cfgType=Calibration')\n# proc = subprocess.Popen(command, shell = True, stdout = log, stderr = subprocess.STDOUT)\n# logFile.write(command)\n# procs['Calibration'] = (proc, log)\n\n# if esIn:\n# log = open(config.logdir + '/es_dqm_sourceclient-privlive_cfg.log', 'a')\n# log.write('\\n\\n\\n')\n# command = 'source $HOME/DQM/cmssw.sh; exec cmsRun {conf} {common}'.format(conf = config.workdir + '/es_dqm_sourceclient-privlive_cfg.py', common = commonOptions)\n# proc = subprocess.Popen(command, shell = True, stdout = log, stderr = subprocess.STDOUT)\n# logFile.write(command)\n# procs['ES'] = (proc, log)\n\n elif daq == 'minidaq':\n if not os.path.isdir('/dqmminidaq/run%d' % run):\n logFile.write('DQM stream was not produced')\n return {}\n\n commonOptions = 'runNumber={run} runInputDir={inputDir} workflow=/{dataset}/{period}/MiniDAQ'.format(run = run, inputDir = '/dqmminidaq', dataset = workflowBase, period = config.period)\n\n if ecalIn:\n \n ecalOptions = 'environment=PrivLive outputPath={outputPath} verbosity={verbosity}'.format(outputPath = config.tmpoutdir, verbosity = VERBOSITY)\n \n log = open(config.logdir + '/ecalcalib_dqm_sourceclient-privlive_cfg.log', 'a')\n log.write('\\n\\n\\n')\n command = 'source $HOME/DQM/cmssw.sh; exec cmsRun {conf} {common} {ecal} {spec}'.format(conf = config.workdir + '/ecalConfigBuilder.py', common = commonOptions, ecal = ecalOptions, spec = 'cfgType=CalibrationStandalone')\n proc = subprocess.Popen(command, shell = True, stdout = log, stderr = subprocess.STDOUT)\n logFile.write(command)\n procs['Calibration'] = (proc, log)\n\n if esIn:\n log = open(config.logdir + '/es_dqm_sourceclient-privlive_cfg.log', 'a')\n log.write('\\n\\n\\n')\n command = 'source $HOME/DQM/cmssw.sh; exec cmsRun {conf} {common}'.format(conf = config.workdir + '/es_dqm_sourceclient-privlive_cfg.py', common = commonOptions)\n proc = subprocess.Popen(command, shell = True, stdout = log, stderr = subprocess.STDOUT)\n logFile.write(command)\n procs['ES'] = (proc, log)\n\n logFile.write('Running configurations:', sorted(procs.keys()))\n\n return procs", "def test_describe_diagnostics():\n\n m = pyqg.QGModel(1)\n m.describe_diagnostics()", "def gen_910AQ_SO3H():\r\n q_smiles_base = {}\r\n q_smiles_mid = {}\r\n\r\n q_smiles_base['9,10AQ'] = 'O=C1C2C=CC=CC2C(=O)C2=C1C=CC=C2'\r\n q_smiles_base['9,10AQ,1-OH'] = 'OS(=O)(=O)C1=CC=CC2C1C(=O)C1=C(C=CC=C1)C2=O'\r\n q_smiles_base['9,10AQ,2-OH'] = 'OS(=O)(=O)C1=CC2C(C=C1)C(=O)C1=C(C=CC=C1)C2=O'\r\n q_smiles_base['9,10AQ,Full-OH'] = 'OS(=O)(=O)C1=C(C(=C(C2C1C(=O)C1=C(C2=O)C(=C(C(=C1S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O'\r\n\r\n q_smiles_mid['9,10AQ'] = 'O=C1C2C=CC=CC2C(=O)C2=C1C=CC=C2'\r\n q_smiles_mid['9,10AQ,1-OH'] = 'OS(=O)(=O)C1=CC=CC2C1C(=O)C1=C(C=CC=C1)C2=O'\r\n q_smiles_mid['9,10AQ,2-OH'] = 'OS(=O)(=O)C1=CC2C(C=C1)C(=O)C1=C(C=CC=C1)C2=O'\r\n q_smiles_mid['9,10AQ,1,2-OH'] = 'OS(=O)(=O)C1=C(C2C(C=C1)C(=O)C1=C(C=CC=C1)C2=O)S(O)(=O)=O'\r\n q_smiles_mid['9,10AQ,Full-OH'] = 'OS(=O)(=O)C1=C(C(=C(C2C1C(=O)C1=C(C2=O)C(=C(C(=C1S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O)S(O)(=O)=O'\r\n\r\n return q_smiles_base, q_smiles_mid", "def ADM_QED(nf):\n Qu = 2/3\n Qd = -1/3\n Qe = -1\n nc = 3\n gamma_QED = np.array([[8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc],\n [8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],\n [8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],\n [8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc],\n [8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],\n [8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe],\n [8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe],\n [8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe]])\n gamma_QED_1 = np.zeros((2,154))\n gamma_QED_2 = np.hstack((np.zeros((8,2)),gamma_QED,np.zeros((8,144))))\n gamma_QED_3 = np.hstack((np.zeros((8,10)),gamma_QED,np.zeros((8,136))))\n gamma_QED_4 = np.zeros((136,154))\n gamma_QED = np.vstack((gamma_QED_1, gamma_QED_2, gamma_QED_3, gamma_QED_4))\n\n if nf == 5:\n return gamma_QED\n elif nf == 4:\n return np.delete(np.delete(gamma_QED, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94, 102, 110, 118, 126, 134, 142, 150], 0)\\\n , [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94, 102, 110, 118, 126, 134, 142, 150], 1)\n elif nf == 3:\n return np.delete(np.delete(gamma_QED, [5,6, 13,14, 21,22, 29,30, 41,42, 49,50, 57,58, 65,66, 73,74, 81,82,\\\n 93,94, 101,102, 109,110, 117,118, 125,126, 133,134, 141,142, 149,150], 0)\\\n , [5,6, 13,14, 21,22, 29,30, 41,42, 49,50, 57,58, 65,66, 73,74, 81,82,\\\n 93,94, 101,102, 109,110, 117,118, 125,126, 133,134, 141,142, 149,150], 1)\n else:\n raise Exception(\"nf has to be 3, 4 or 5\")", "def SystemID_spec_match(q,dt=.1):\n\n Dim = q.shape[1]\n\n k_opt,D_opt = np.zeros(Dim),np.zeros(Dim)\n\n # loop over dimensions\n for j in range(0,Dim):\n print('identifying system #'+str(j+1)+'via spectral match')\n print(50*'-')\n # first we optimize in q-space\n k_opt[j],D_opt[j]=Optimize_Sqq(q[:,j],dt)\n\n Sys_Params={'k': k_opt, 'beta':D_opt/k_opt,'D':D_opt}\n \n return Sys_Params", "def addMeds(self):\n if self.pid in Med.meds: \n for m in Med.meds[self.pid]:\n\n # build the fills, setting some defaults\n subs = {\n 'qunit': {'qunit': '{tab}'},\n 'pbm': {'pbm': 'T00000000001011'},\n 'ncpdp': {'ncpdp': '5235235'},\n 'pharm_org': {'pharm_org': 'CVS #588'},\n 'pharm_co': {'pharm_co': 'Australia'},\n 'pharm_ci': {'pharm_ci': 'Wonder City'},\n 'pharm_pc': {'pharm_pc': '5555'},\n 'pharm_st': {'pharm_st': '111 Lake Drive'},\n 'pharm_re': {'pharm_re': 'West Australia'},\n 'prov_dea': {'prov_dea': '325555555'},\n 'prov_npi': {'prov_npi': '5235235'},\n 'prov_email': {'prov_email': '[email protected]'},\n 'prov_fn': {'prov_fn': 'Joshua'},\n 'prov_ln': {'prov_ln': 'Mandel'},\n 'prov_tel': {'prov_tel': '1-234-567-8910'},\n }\n fills_str = ''\n for f in Refill.refill_list(self.pid, m.rxn):\n self._set_default_attrs(f, subs)\n fills_str = '\\n'.join([fills_str, FULFILLMENT.sub({\n 'date': f.date,\n 'days': f.days,\n 'pbm': f.pbm,\n 'ncpdp': f.ncpdp,\n 'pharm_org': f.pharm_org,\n 'pharm_co': f.pharm_co,\n 'pharm_ci': f.pharm_ci,\n 'pharm_pc': f.pharm_pc,\n 'pharm_st': f.pharm_st,\n 'pharm_re': f.pharm_re,\n 'prov_dea': f.prov_dea,\n 'prov_npi': f.prov_npi,\n 'prov_email': f.prov_email,\n 'prov_fn': f.prov_fn,\n 'prov_ln': f.prov_ln,\n 'prov_tel': f.prov_tel,\n 'quantity': f.q,\n 'quantityUnits': f.qunit}).done()])\n\n # build the med, setting some defaults\n subs = {\n 'qtt': {'qtt': 30, 'qttunit': '{tab}'},\n 'freq': {'freq':2, 'frequnit': '/d'},\n 'prov': {'prov': 'Derived by prescription', 'prov_id': 'prescription'},\n 'end': {'end': '2010-04-09'},\n }\n self._set_default_attrs(m, subs)\n med_data = {\n 'name': m.name,\n 'rxnorm': m.rxn,\n 'endDate': m.end,\n 'frequencyValue': m.freq,\n 'frequencyUnits': m.frequnit,\n 'instructions': m.sig,\n 'provenance': m.prov,\n 'provenance_id': m.prov_id,\n 'quantityValue': m.qtt,\n 'quantityUnits': m.qttunit,\n 'startDate': m.start,\n }\n med_str = MEDICATION.sub(med_data).sub({'fills':fills_str}, escape=False).done() \n self.data.append(SDMX.sub({'models':med_str}, escape=False).done())", "def load_oqmd(\n self,\n space=None,\n search={},\n exclude={},\n stable=False,\n fit=\"standard\",\n total=False,\n ):\n from qmpy.materials.formation_energy import FormationEnergy\n from qmpy.materials.element import Element\n\n logger.debug(\"Loading Phases from the OQMD\")\n data = FormationEnergy.objects.all()\n ##data = data.filter(entry__id=F('entry__duplicate_of__id'))\n\n if fit:\n data = data.filter(fit=fit)\n else:\n total = True\n\n if stable:\n data = data.filter(stability__lte=0)\n\n if search:\n data = data.filter(**search)\n\n if exclude:\n data = data.exclude(**exclude)\n\n if space:\n ## Query phase space using element_list\n dim = len(space) + 1\n\n element_q_lst = [\n Q(composition__element_list__contains=s + \"_\") for s in space\n ]\n combined_q = reduce(operator.or_, element_q_lst)\n combined_q = reduce(\n operator.and_, [combined_q, Q(composition__ntypes__lt=dim)]\n )\n\n exclude_element_q_lst = [\n Q(composition__element_list__contains=e.symbol + \"_\")\n for e in Element.objects.exclude(symbol__in=space)\n ]\n combined_q_not = reduce(operator.or_, exclude_element_q_lst)\n\n data = data.filter(combined_q).exclude(combined_q_not)\n\n ## The following is old method (will be removed in future)\n # space_qs = Element.objects.exclude(symbol__in=space)\n # data = data.filter(composition__element_set__in=space)\n # data = data.exclude(composition__element_set__in=space_qs)\n\n data = data.distinct()\n columns = [\n \"id\",\n \"composition_id\",\n \"stability\",\n \"calculation__input__spacegroup\",\n ]\n if total:\n columns.append(\"calculation__energy_pa\")\n else:\n columns.append(\"delta_e\")\n\n values = data.values(*columns)\n\n for row in values:\n if total:\n energy = row[\"calculation__energy_pa\"]\n else:\n energy = row[\"delta_e\"]\n try:\n phase = Phase(\n energy=energy,\n composition=parse_comp(row[\"composition_id\"]),\n description=row[\"calculation__input__spacegroup\"],\n stability=row[\"stability\"],\n per_atom=True,\n total=total,\n )\n phase.id = row[\"id\"]\n self.add_phase(phase)\n except TypeError:\n raise PhaseError(\n \"Something went wrong with Formation object\\\n {}. No composition?\".format(\n row[\"id\"]\n )\n )", "def quality(self): \n\n subsetInt = [int(s) for s in self.subset.split() if s.isdigit()]\n columnNames = [] \n for i in range(len(subsetInt)):\n if subsetInt[i] == 1:\n columnNames.append(self.varNames[i])\n\n #qualityBand number of subset\n q = columnNames.index('Quality') \n\n if subsetInt[self.qualityBand] == 1:\n dataCount = self.subset.count('1')\n QC = np.repeat(self.DC[:,q].reshape((self.DC.shape[0],1)), dataCount-1, axis = 1)\n if self.dataset == 'MOD09A1.005' or self.dataset == 'MOD13Q1.005':\n QC = np.uint16(QC)\n else:\n QC = np.uint8(QC)\n\n QCm = QC & 1 #flips DCm mask\n DCm = np.delete(self.DC, q, 1) #looks good\n \n DCm = np.ma.masked_where(QCm == 1, DCm)\n DCm = np.ma.masked_where(DCm == 9999.0, DCm) \n \n if len(self.tiles) > 1:\n obs = self.observations/len(self.tiles)\n if len(self.tiles) == 1:\n obs = self.observations/2\n \n outArray = np.empty(shape = (self.rows*self.columns*obs, 0))\n for b in range(0, self.DC.shape[1]-1):\n cfull = DCm[:,b].reshape((self.observations, self.rows, self.columns))\n b16 = np.empty(shape = (self.rows*self.columns*obs, 0))\n for band in range(0,cfull.shape[0],2):\n c16 = np.ma.mean(cfull[band:band+1,:,:], axis=0)\n c16f = np.ma.filled(c16, 9999.0).astype(float).reshape((self.rows*self.columns))\n b16 = np.append(b16, c16f)\n outArray = np.append(outArray, b16.reshape((obs*self.rows*self.columns, 1)), axis = 1)\n \n self.finalDC = outArray\n \n np.save(str(self.directory) + '/' + self.dataset + '.npy', self.finalDC)\n del outArray, QC, DCm\n\n outfile = str(self.directory) + '/' + self.dataset + '.txt'\n f = open(outfile, 'w')\n for name in columnNames:\n if name != 'Quality':\n f.write(name + '\\n')\n var = [a for a in columnNames if not a.startswith('Quality')]\n logger.log('SUCCESS', 'The final 16-day interval quality-masked matrix was created successfully. This matrix has dimensions %d rows by %d columns. Datasets included in the matrix are %s' % (self.finalDC.shape[0], self.finalDC.shape[1], var))\n \n \n if subsetInt[self.qualityBand] != 1:\n cleanDC = np.delete(self.DC, q, 1)\n \n \n if len(self.tiles) > 1:\n obs = self.observations/len(self.tiles)\n if len(self.tiles) == 1:\n obs = self.observations/2\n \n outArray = np.empty(shape = (self.rows*self.columns*obs, 0))\n for b in range(cleanDC.shape[1]):\n cfull = cleanDC[:,b].reshape((self.observations, self.rows, self.columns))\n b16 = np.empty(shape=(self.rows*self.columns*obs))\n for band in range(cfull.shape[0]):\n c16 = np.mean(cfull[band:band+1,:,:], axis=0)\n band16 = np.append(b16, c16, axis=0)\n outArray = np.append(outArray, b16.reshape((obs*self.rows*self.columns, 1)), axis = 1)\n\n np.save(self.directory + '/' + self.dataset + '.npy', self.finalDC)\n del cleanDC, outArray\n \n outfile = self.directory + '/' + self.dataset + '.txt'\n f = open(outfile, 'w')\n for name in columnNames:\n if name != 'Quality':\n f.write(str(name) + ' \\n')\n var = [a for a in columnNames if not a.startswith('Quality')]\n logger.log('SUCCESS', 'The final 16-day interval matrix was created successfully. A quality mask was not applied, though remaining no data values are set at 9999. This matrix has dimensions %d rows by %d columns. Datasets included in the matrix are %s' % (self.finalDC.shape[0], self.finalDC.shape[1], var))", "def test_calculate_mil_hdbk_217f_part_stress():\n ATTRIBUTES['hazard_rate_method_id'] = 2\n ATTRIBUTES['environment_active_id'] = 3\n ATTRIBUTES['subcategory_id'] = 1\n ATTRIBUTES['type_id'] = 1\n ATTRIBUTES['specification_id'] = 1\n ATTRIBUTES['temperature_active'] = 32.0\n ATTRIBUTES['quality_id'] = 2\n ATTRIBUTES['insert_id'] = 1\n ATTRIBUTES['contact_gauge'] = 20\n ATTRIBUTES['current_operating'] = 2\n ATTRIBUTES['n_cycles'] = 2\n ATTRIBUTES['n_active_pins'] = 20\n\n _attributes, _msg = Connection.calculate_217f_part_stress(**ATTRIBUTES)\n\n assert isinstance(_attributes, dict)\n assert _msg == ''\n assert pytest.approx(_attributes['voltage_ratio'], 0.67)\n assert pytest.approx(_attributes['lambda_b'], 0.07944039)\n assert pytest.approx(_attributes['piCV'], 0.3617763)\n assert _attributes['piQ'] == 10.0\n assert pytest.approx(_attributes['hazard_rate_active'], 1.005887691)\n assert pytest.approx(_attributes['temperature_rise'], 2.3072012)\n assert pytest.approx(_attributes['lambda_b'], 0.0006338549)\n assert _attributes['piK'] == 2.0\n assert pytest.approx(_attributes['piP'], 4.0062301)\n assert _attributes['piE'] == 21.0\n assert pytest.approx(_attributes['hazard_rate_active'], 0.1066535)", "def qdd(self):\n return self._qdd", "def usefulquantities(dffin):\n dffin['log_length_box'] = np.log(dffin['length_box_um'])\n dffin['time_min']=dffin['time_sec']/60\n dffin['pred_length_box_um'] = np.exp(dffin['pred_log_length'])\n dffin['unique_id'] = dffin['cell']+dffin['time_sec'].apply(lambda x:str(x))\n dffin['cv_gr']= dffin.groupby('cell')['pred_growth_rate'].transform(lambda x:\\\n np.std(x)/np.mean(x))\n dffin['std_gr']= dffin.groupby('cell')['pred_growth_rate'].transform(lambda x: np.std(x))\n dffin['mean_gr'] = dffin.groupby('cell')['pred_growth_rate'].transform(lambda x: np.mean(x))\n dffin['mean_len'] = dffin.groupby('cell')['pred_length_box_um'].transform(lambda x: np.mean(x))\n dffin['norm_pred_growth_rate'] = (dffin['pred_growth_rate']-dffin.groupby('cell')['pred_growth_rate'].transform(lambda\\\n x: np.mean(x)))/dffin.groupby('cell')['pred_growth_rate'].transform(lambda x: np.mean(x))\n dffin = rl.genalogy(dffin,'parent_cell') #Create genealogy\n dffin = rl.genalogy(dffin,'g_parent_cell')\n dffin = rl.genalogy(dffin,'g_g_parent_cell')\n dffin = dffin.set_index('unique_id')\n qq= dffin.groupby('cell').apply(lambda x: (x['pred_length_box_um']-x['pred_length_box_um'].iloc[0])/(x['pred_length_box_um'].iloc[-1]-x['pred_length_box_um'].iloc[0])).rename('add_len')\n jj= dffin.groupby('cell').apply(lambda x: (x['time_sec']-x['time_sec'].iloc[0])/(x['time_sec'].iloc[-1]-x['time_sec'].iloc[0])).rename('cell_cycle')\n return pd.concat([dffin, qq.reset_index().set_index('unique_id')['add_len'], jj.reset_index().set_index('unique_id')['cell_cycle']], axis=1, join='inner')", "def create_dqcr_dqch_dqid(runnumber, data, pmtdb_data=None):\n # If no pmtdb data has been given, warn the user that\n # she/he is missing out on some information.\n if pmtdb_data is None:\n print \"\"\"chstools: Warning: no pmtdb information was given.\n DQCH bits 0 (cable) and 1 (resistor) will be set to\n 0 by default.\n Check the README for details on how to contact the\n SQL pmtdb for this information.\n \"\"\"\n # Get the mtc, xl3 and fec information from the run configuration data\n rows = data['rows']\n for value in rows:\n mtc = value['doc']['mtc']\n xl3s = value['doc']['xl3s']\n fecs = value['doc']['fec32_card']\n # Create the arrays that will hold the dqid, dqcr and dqch info\n dqid = [0x0 for i in range(19 * 96)]\n dqcr = [0x0 for i in range(19 * 16)]\n dqch = [0x0 for i in range(19 * 16 * 32)]\n # Now loop over crate/card/channel and fill the DQID, DQCR and DQCH words\n for crate in range(0, 19):\n for card in range(0, 16):\n dqcr_word = 0\n if len(fecs.get(str(crate))[str(card)]) > 0:\n dqid[(crate * 96) +\n (card * 6)] = hex(int(fecs[str(crate)]\n [str(card)]\n ['mother_board']\n ['mother_board_id'],\n 16))\n dqid[(crate * 96) + (card * 6) + 1] = hex(int(\"0x0\", 16))\n for db in range(0, 4):\n dqid[(crate * 96) +\n (card * 6) +\n 2 + db] = hex(int(fecs[str(crate)]\n [str(card)]\n ['daughter_board']\n [str(db)]\n ['daughter_board_id'],\n 16))\n else:\n # The DQIDs will be zero in this case\n print \"chstools: Warning: no FEC info for crate/card \" +\\\n str(crate) + \"/\" + str(card)\n # Time for DQCR!\n # 0 CRATE Crate present present(0), not present(1)\n # SNO+ : this is now replaced with: XL3 communicating.\n # FIXME (Freija): not available in ORCA database for so I am just\n # checking to see if there is XL3 info present... this might just\n # be always the case though.\n xl3com = len(xl3s.get(str(crate))) == 0\n dqcr_word |= (xl3com << 0)\n # 1 MB MB present(0), not present(1)\n # If the board id cannot be read, assume board\n # not present\n # Just check if ID is not \"0x0\"\n if len(fecs.get(str(crate))[str(card)]) > 0:\n mbPresent = (fecs.get(str(crate))[str(card)]['mother_board']\n ['mother_board_id'] == \"0\")\n else:\n mbPresent = 1\n dqcr_word |= (mbPresent << 1)\n # 2 PMTIC PMTIC present(0), not present(1)\n # If the board id cannot be read, assume board\n # not present\n # FIXME (Freija) PMTIC ID is not available. For now, leave as\n # present = hardcoded for now\n pmticPresent = 0\n dqcr_word |= (pmticPresent << 2)\n # 3 DAQ DAQ readout (eCPU) online(0), offline(1)\n # SNO+ - Crate is in Normal Mode (0->normal, 1-> not-normal).\n # Modes: 1=init, 2=normal, 3:cgt\n # (no one seems to know what this last one means?)\n slotNormal = ((xl3s.get(str(crate))[\"xl3_mode\"]) == \"2\")\n dqcr_word |= (slotNormal << 3)\n # 4 DC Daughter cards all present(0),\n # 4 bit mask of present DC\n # Channel i associated with DC at bit DC + i/8\n # If the board id cannot be read, assume not present\n for db in range(0, 4):\n if len(fecs.get(str(crate))[str(card)]) > 0:\n dbPresent = (fecs.get(str(crate))[str(card)]\n [\"daughter_board\"]\n [str(db)]\n [\"daughter_board_id\"] == \"0\")\n else:\n dbPresent = 1\n dqcr_word |= (mbPresent << 4 + db)\n # 9 GT GT mask for crate, i.e., is this crate receiving\n # global triggers? yes = 0, no =1\n # The GT mask is now a bitmask\n crMask = (1 << crate)\n gtMask = not(crMask & int(mtc[\"gt_mask\"]))\n # FIXME (Freija) The GT mask is not being written out correctly\n # Default to always masked in for now\n # dqcr_word |= (gtMask << 9)\n dqcr_word |= (0 << 9)\n # 10 CR_ONLINE Crate on-line (i.e., is being read out by ECPU)\n # New definition: is the crate initialized.\n # FIXME (Freija): this is not available in configuration file.\n dqcr_word |= (0 << 10)\n # 12 RELAY HV relays all on(0), 4 bit mask of relays on\n # The status of the HV relays come in two words:\n # hv_relay_high_mask and hv_relay_low_mask\n # So cards 0-7 are defined in the low and 8-15 in the high mask.\n relay_word = 0\n if card < 8:\n # As a reminder: '0' means that the relay is closed...\n relay_word = int((xl3s.get(str(crate))[\"hv_relay_low_mask\"]) >>\n (card * 4)) & 0xf\n relay_word = ~relay_word & 0xf\n else:\n relay_word = int((xl3s.get(str(crate))[\"hv_relay_high_mask\"]) >>\n ((card - 8) * 4)) & 0xf\n relay_word = ~relay_word & 0xf\n dqcr_word |= (relay_word << 12)\n # 8 SLOT_OP OR of bits 0-7, 12-15.\n # This is deemed to mean, 'slot operational',\n # an OR of crate, slot, db, etc operational.\n # FIXME (Freija) I do not see why an entire slot is un-operational\n # if one HV relay is off? Or when a DB is missing?\n # In any case, this just comes down to checking if dqcr_word is\n # zero at this point\n slotOp = not(dqcr_word == 0)\n dqcr_word |= (slotOp << 8)\n # 16 HV HV for this card. 12 bits (0-4095)\n # This is now in the xl3 information as hv_voltage_read_value_a or\n # hv_voltage_read_value_b. The xl3 value is not in ADC counts but\n # in actual Volts so do a conversion:\n readHV = int(4095.0 * xl3s.get(str(crate))\n [\"hv_voltage_read_value_a\"] / 3000)\n dqcr_word |= (readHV << 16)\n # 11 CR_HV or of bit 0 (SNO crate present) and bits 16-31.\n crate_hv = not((dqcr_word & 0x1) | readHV > 0)\n dqcr_word |= (crate_hv << 11)\n # Now time for DQCH = channel-dependent status word...\n # We can only get this info if the FEC card object exists\n for ch in range(0, 32):\n # Find out which DB this channel is on\n db_index = int(ch / 8)\n # Find out which channel this is on the DB in question\n db_channel = ch % 8\n dqch_word = 0\n if pmtdb_data is not None:\n # 0: cable status (from PMTDB)\n # The cable status is defined (for now) as the 'or' of the\n # pulledCable, BadCable columns from the pmtdb table\n # The bit will be set if the cable is considered bad.\n cable_status = pmtdb_data[(crate, card, ch)][0] |\\\n pmtdb_data[(crate, card, ch)][1]\n dqch_word |= (cable_status << 0)\n # 1: pulled resistor (from PMTDB)\n # The resistor status is defined as the rPulled column\n # from the pmtdb.\n # The bit will be set if the resistor is pulled.\n resistor_status = pmtdb_data[(crate, card, ch)][2]\n dqch_word |= (resistor_status << 1)\n if len(fecs.get(str(crate))[str(card)]) > 0:\n # 5, 6, 7, 8, 9: All zero, meaning present & operating\n # 2 : sequencer\n # FIXME (Freija) This seems to be always 1 (disabled),\n # will set to 0 by default for now.\n # Uncomment the next lines when this is fixed...\n # sequencer = not(int(fecs[str(crate)][str(card)]\n # [\"mother_board\"]\n # [\"sequencer_mask\"]) >> ch) & 0x1\n sequencer = 0\n dqch_word |= (sequencer << 2)\n # 3: N100 enabled\n # FIXME (Freija) This seems to be always 1 (disabled),\n # will set to 0 by default for now.\n # Uncomment the next line when this is fixed...\n # n100 = not( int(fecCards.get(str(crate))[str(card)]\n # [\"mother_board\"]\n # [\"trigger_100ns_mask\"]) >>\n # ch) & 0x1;\n n100 = 0\n dqch_word |= (n100 << 3)\n # 4: N20 enabled\n # FIXME (Freija) This seems to be always 1 (disabled),\n # will set to 0 by default for now.\n # Uncomment the next line when this is fixed...\n # n20 = not(int(fecs.get(str(crate))[str(card)]\n # [\"mother_board\"]\n # [\"trigger_20ns_mask\"]) >>\n # ch) & 0x1;\n n20 = 0\n dqch_word |= (n20 << 4)\n # 10: Bad (or of bits 0-9)\n bad = ((dqch_word & 0x511) > 0)\n dqch_word |= (bad << 10)\n # 16-23: vthr\n vthr = int(fecs.get(str(crate))[str(card)]\n [\"daughter_board\"]\n [str(db_index)]\n [\"vt\"]\n [str(db_channel)])\n dqch_word |= ((vthr & 0xff) << 16)\n # 24-31: vthr zero\n vthr_zero = int(fecs.get(str(crate))[str(card)]\n [\"daughter_board\"]\n [str(db_index)]\n [\"vt_zero\"]\n [str(db_channel)])\n dqch_word |= ((vthr_zero & 0xff) << 24)\n else:\n dqch_word = 0x0\n dqch[(crate * 16 * 32) + (card * 32) + ch] = dqch_word\n dqcr[(crate * 16) + card] = dqcr_word\n return dqcr, dqch, dqid", "def __init__(\n self, ctx, coll_j_range=Range('J', 0, Symbol('Jmax') + 1),\n coll_m_range=Range('M'),\n coll_j_dumms=tuple(\n Symbol('J{}'.format(i)) for i in range(1, 30)\n ),\n coll_m_dumms=tuple(\n Symbol('M{}'.format(i)) for i in range(1, 30)\n ),\n tilde_range=Range(r'\\tilde{Q}', 0, Symbol('Ntilde')),\n form_tilde=form_tilde,\n m_range=Range('m'), form_m=form_m, **kwargs\n ):\n super().__init__(ctx, **kwargs)\n\n # Convenient names for quantum number access functions inside drudge\n # scripts.\n self.set_name(\n n_=NOf, NOf=NOf, l_=LOf, LOf=LOf, j_=JOf, JOf=JOf,\n tilde_=TildeOf, TildeOf=TildeOf, m_=MOf, MOf=MOf,\n pi_=PiOf, PiOf=PiOf\n )\n\n self.coll_j_range = coll_j_range\n self.coll_m_range = coll_m_range\n self.coll_j_dumms = coll_j_dumms\n self.coll_m_dumms = coll_m_dumms\n self.set_dumms(coll_j_range, coll_j_dumms)\n self.set_dumms(coll_m_range, coll_m_dumms)\n\n self.tilde_range = tilde_range\n self.form_tilde = form_tilde\n self.tilde_dumms = tuple(form_tilde(i) for i in self.qp_dumms)\n self.set_dumms(tilde_range, self.tilde_dumms)\n\n self.m_range = m_range\n self.form_m = form_m\n self.m_dumms = tuple(form_m(i) for i in self.qp_dumms)\n self.set_dumms(m_range, self.m_dumms)\n\n self.add_resolver_for_dumms()\n\n # Add utility about CG coefficients and related things.\n self.set_name(\n CG=CG, Wigner3j=Wigner3j, Wigner6j=Wigner6j, Wigner9j=Wigner9j\n )\n\n self._am_sum_simplifiers = BCastVar(self.ctx, {\n # TODO: Add more simplifications here.\n 2: [_sum_2_3j_to_delta],\n 5: [_sum_4_3j_to_6j]\n })\n self.set_tensor_method('simplify_am', self.simplify_am)\n\n # All expressions for J/j, for merging of simple terms with factors in\n # J/j-hat style.\n self._j_exprs = frozenset(itertools.chain(self.coll_j_dumms, (\n JOf(i) for i in self.tilde_dumms\n )))\n\n # For angular momentum coupling.\n self.set_tensor_method('do_amc', self.do_amc)\n\n # Special simplification routines.\n self.set_tensor_method('simplify_pono', self.simplify_pono)\n self.set_tensor_method('deep_simplify', self.deep_simplify)\n self.set_tensor_method('merge_j', self.merge_j)", "def test_get_diagnose(self):\n self._build_sample_graph()\n # Adding cycle a -> d -> a\n self.skill_graph.add_prerequisite(self.sa.id, self.sd.id)\n # Adding singleton\n sg = self.skill_graph.add(Skill.build('g', ''))\n skill_map = SkillMap.load(self.course)\n result = SkillMapMetrics(skill_map).diagnose()\n expected = {\n 'cycles': [[self.sd.id, self.sa.id]],\n 'singletons': [sg.id],\n 'long_chains': []\n }\n self.assertEqual(result, expected)", "def quality_checks(ds):\n parameters = ['barometric_pressure', 'relative_humidity', 'air_temperature', 'longwave_irradiance',\n 'precipitation', 'shortwave_irradiance', 'sea_surface_temperature', 'sea_surface_conductivity',\n 'sea_surface_salinity', 'eastward_wind_velocity', 'northward_wind_velocity']\n for p in parameters:\n # The primary failure mode of the METBK is to repeat the last value it received from a sensor.\n # Use the IOOS QARTOD flat line test to identify these cases (consider it suspect if it repeats\n # for 20+ minutes and failed if it repeats for 35+ minutes).\n flags = qartod.flat_line_test(ds[p].values, ds['time'].values, 1200, 2100, 0.00001)\n\n # The secondary failure mode occurs when the METBK logger sets values to a NaN if no sensor data is available.\n # In the case of the sea surface conductivity and temperature data, different values are used to represent\n # missing data. Specifically, the values are set to a 0.0 and -5.0, respectively. In either case, (NaNs or\n # 0.0 and -5.0) set the QC flag to 9 to indicate \"Missing\" data, and then convert the 0.0 and -5.0 values to\n # a NaN to avoid propagating false numbers into subsequent calculations (e.g. salinity or heat flux).\n if p == 'sea_surface_temperature':\n m = ds[p] < -4.0 # use a floating point value just above -5\n flags[m] = 9\n ds[p][m] = np.nan\n ds['sea_surface_salinity'][m] = np.nan\n elif p == 'sea_surface_conductivity':\n m = ds[p] < 0.5 # use a floating point value just above 0\n flags[m] = 9\n ds[p][m] = np.nan\n ds['sea_surface_salinity'][m] = np.nan\n else:\n m = np.isnan(ds[p])\n flags[m] = 9\n\n # add the qc_flags to the dataset, rolling up the results into a single value\n qc_summary = p + '_qc_summary_flag'\n if qc_summary in ds.variables:\n # add the new test results to the existing QC summary results\n qc = ds[qc_summary]\n flags = np.array([flags, qc.values])\n ds[qc_summary] = ('time', flags.max(axis=0, initial=1))\n else:\n # create a new QC summary variable\n ds[qc_summary] = ('time', flags)\n\n # set up the attributes for the new variable\n ds[qc_summary].attrs = dict({\n 'long_name': '%s QC Summary Flag' % ds[p].attrs['long_name'],\n 'standard_name': 'aggregate_quality_flag',\n 'comment': ('Summary quality flag combining the results of the instrument-specific quality tests with '\n 'existing OOI QC tests, if available, to create a single QARTOD style aggregate quality flag'),\n 'flag_values': np.array([1, 2, 3, 4, 9]),\n 'flag_meanings': 'pass not_evaluated suspect_or_of_high_interest fail missing'\n })", "def PrepareMCOutput(vrs):\n\n vrs['nUniq'] = len(vrs[\"codes\"].keys())\n\n vrs[\"Rd\"]= InitReport(vrs)\n\n vrs[\"Rd\"][\"EstimateDiversity_d\"] = EstimateDiversity(vrs)\n \n vrs[\"Rd\"][\"Estimate_Bias_d\"] = EstimateBias(vrs)\n\n vrs[\"fastq_fp\"] = vrs[\"fastq_fp\"]\n\n return vrs[\"Rd\"]", "def test_dq_1_conftest(dq_1):\n assert dq_1._dll.head.data == 9", "def total_sdram_requirements(self):", "def test_get_qasms_noname(self):\n q_program = QuantumProgram()\n qr = q_program.create_quantum_register(size=3)\n cr = q_program.create_classical_register(size=3)\n qc1 = q_program.create_circuit(qregisters=[qr], cregisters=[cr])\n qc2 = q_program.create_circuit(qregisters=[qr], cregisters=[cr])\n qc1.h(qr[0])\n qc1.cx(qr[0], qr[1])\n qc1.cx(qr[1], qr[2])\n qc1.measure(qr[0], cr[0])\n qc1.measure(qr[1], cr[1])\n qc1.measure(qr[2], cr[2])\n qc2.h(qr)\n qc2.measure(qr[0], cr[0])\n qc2.measure(qr[1], cr[1])\n qc2.measure(qr[2], cr[2])\n results = dict(zip(q_program.get_circuit_names(), q_program.get_qasms()))\n qr_name_len = len(qr.name)\n cr_name_len = len(cr.name)\n self.assertEqual(len(results[qc1.name]), qr_name_len * 9 + cr_name_len * 4 + 147)\n self.assertEqual(len(results[qc2.name]), qr_name_len * 7 + cr_name_len * 4 + 137)", "def DM(self):", "def test_parse_hsp_details(self):\n for query in self.result:\n # should check integers in next version.\n first_hsp = self.result[query][0][0]\n self.assertEqual(first_hsp[\"QUERY ID\"], 1)\n self.assertEqual(first_hsp[\"BIT_SCORE\"], \"1023.46\")\n self.assertEqual(first_hsp[\"SCORE\"], \"2645\")\n self.assertEqual(first_hsp[\"E_VALUE\"], \"0.333\")\n self.assertEqual(first_hsp[\"QUERY_START\"], \"4\")\n self.assertEqual(first_hsp[\"QUERY_END\"], \"18\")\n self.assertEqual(first_hsp[\"QUERY_ALIGN\"], \"ELEPHANTTHISISAHITTIGER\")\n self.assertEqual(first_hsp[\"MIDLINE_ALIGN\"], \"ORCA-WHALE\")\n self.assertEqual(first_hsp[\"SUBJECT_ALIGN\"], \"SEALSTHIS---HIT--GER\")\n self.assertEqual(first_hsp[\"SUBJECT_START\"], \"5\")\n self.assertEqual(first_hsp[\"SUBJECT_END\"], \"19\")\n self.assertEqual(first_hsp[\"PERCENT_IDENTITY\"], \"55\")\n self.assertEqual(first_hsp[\"POSITIVE\"], \"555\")\n self.assertEqual(first_hsp[\"GAP_OPENINGS\"], 0)\n self.assertEqual(first_hsp[\"ALIGNMENT_LENGTH\"], \"14\")\n\n gap_hsp = self.result[query][0][1]\n self.assertEqual(gap_hsp[\"GAP_OPENINGS\"], \"33\")", "def ADT_QCD_LEPTON():\n\n # As input for the quark-mass ratios, we use the quark masses at MZ and the lepton masses\n ip = Num_input()\n\n mu = ip.mu_at_MZ\n md = ip.md_at_MZ\n ms = ip.ms_at_MZ\n me = ip.me\n mmu = ip.mmu\n mtau = ip.mtau\n\n # Create the ADT:\n\n gamma_hat_P63eu_Q81u = np.hstack((-16 * me**2/mu**2, np.zeros(5)))\n gamma_hat_P63muu_Q81u = np.hstack((np.zeros(1), -16 * mmu**2/mu**2, np.zeros(4)))\n gamma_hat_P63tauu_Q81u = np.hstack((np.zeros(2), -16 * mtau**2/mu**2, np.zeros(3)))\n\n gamma_hat_P63ed_Q81d = np.hstack((-16 * me**2/md**2, np.zeros(5)))\n gamma_hat_P63mud_Q81d = np.hstack((np.zeros(1), -16 * mmu**2/md**2, np.zeros(4)))\n gamma_hat_P63taud_Q81d = np.hstack((np.zeros(2), -16 * mtau**2/md**2, np.zeros(3)))\n\n gamma_hat_P63es_Q81s = np.hstack((-16 * me**2/ms**2, np.zeros(5)))\n gamma_hat_P63mus_Q81s = np.hstack((np.zeros(1), -16 * mmu**2/ms**2, np.zeros(4)))\n gamma_hat_P63taus_Q81s = np.hstack((np.zeros(2), -16 * mtau**2/ms**2, np.zeros(3)))\n\n\n\n gamma_hat_P63eu_Q82u = np.hstack((np.zeros(3), -16 * me**2/mu**2, np.zeros(2)))\n gamma_hat_P63muu_Q82u = np.hstack((np.zeros(4), -16 * mmu**2/mu**2, np.zeros(1)))\n gamma_hat_P63tauu_Q82u = np.hstack((np.zeros(5), -16 * mtau**2/mu**2))\n\n gamma_hat_P63ed_Q82d = np.hstack((np.zeros(3), -16 * me**2/md**2, np.zeros(2)))\n gamma_hat_P63mud_Q82d = np.hstack((np.zeros(4), -16 * mmu**2/md**2, np.zeros(1)))\n gamma_hat_P63taud_Q82d = np.hstack((np.zeros(5), -16 * mtau**2/md**2))\n\n gamma_hat_P63es_Q82s = np.hstack((np.zeros(3), -16 * me**2/ms**2, np.zeros(2)))\n gamma_hat_P63mus_Q82s = np.hstack((np.zeros(4), -16 * mmu**2/ms**2, np.zeros(1)))\n gamma_hat_P63taus_Q82s = np.hstack((np.zeros(5), -16 * mtau**2/ms**2))\n\n\n\n gamma_hat_P62ue_Q83u = np.hstack((-16 * me**2/mu**2, np.zeros(5)))\n gamma_hat_P62umu_Q83u = np.hstack((np.zeros(1), -16 * mmu**2/mu**2, np.zeros(4)))\n gamma_hat_P62utau_Q83u = np.hstack((np.zeros(2), -16 * mtau**2/mu**2, np.zeros(3)))\n\n gamma_hat_P62de_Q83d = np.hstack((-16 * me**2/md**2, np.zeros(5)))\n gamma_hat_P62dmu_Q83d = np.hstack((np.zeros(1), -16 * mmu**2/md**2, np.zeros(4)))\n gamma_hat_P62dtau_Q83d = np.hstack((np.zeros(2), -16 * mtau**2/md**2, np.zeros(3)))\n\n gamma_hat_P62se_Q83s = np.hstack((-16 * me**2/ms**2, np.zeros(5)))\n gamma_hat_P62smu_Q83s = np.hstack((np.zeros(1), -16 * mmu**2/ms**2, np.zeros(4)))\n gamma_hat_P62stau_Q83s = np.hstack((np.zeros(2), -16 * mtau**2/ms**2, np.zeros(3)))\n\n\n\n gamma_hat_P62ue_Q84u = np.hstack((np.zeros(3), -16 * me**2/mu**2, np.zeros(2)))\n gamma_hat_P62umu_Q84u = np.hstack((np.zeros(4), -16 * mmu**2/mu**2, np.zeros(1)))\n gamma_hat_P62utau_Q84u = np.hstack((np.zeros(5), -16 * mtau**2/mu**2))\n\n gamma_hat_P62de_Q84d = np.hstack((np.zeros(3), -16 * me**2/md**2, np.zeros(2)))\n gamma_hat_P62dmu_Q84d = np.hstack((np.zeros(4), -16 * mmu**2/md**2, np.zeros(1)))\n gamma_hat_P62dtau_Q84d = np.hstack((np.zeros(5), -16 * mtau**2/md**2))\n\n gamma_hat_P62se_Q84s = np.hstack((np.zeros(3), -16 * me**2/ms**2, np.zeros(2)))\n gamma_hat_P62smu_Q84s = np.hstack((np.zeros(4), -16 * mmu**2/ms**2, np.zeros(1)))\n gamma_hat_P62stau_Q84s = np.hstack((np.zeros(5), -16 * mtau**2/ms**2))\n\n\n\n gamma_hat_Q81u = np.vstack((gamma_hat_P63eu_Q81u, gamma_hat_P63muu_Q81u, gamma_hat_P63tauu_Q81u, np.zeros((15,6))))\n gamma_hat_Q81d = np.vstack((np.zeros((3,6)), gamma_hat_P63ed_Q81d, gamma_hat_P63mud_Q81d, gamma_hat_P63taud_Q81d, np.zeros((12,6))))\n gamma_hat_Q81s = np.vstack((np.zeros((6,6)), gamma_hat_P63es_Q81s, gamma_hat_P63mus_Q81s, gamma_hat_P63taus_Q81s, np.zeros((9,6))))\n\n gamma_hat_Q82u = np.vstack((gamma_hat_P63eu_Q82u, gamma_hat_P63muu_Q82u, gamma_hat_P63tauu_Q82u, np.zeros((15,6))))\n gamma_hat_Q82d = np.vstack((np.zeros((3,6)), gamma_hat_P63ed_Q82d, gamma_hat_P63mud_Q82d, gamma_hat_P63taud_Q82d, np.zeros((12,6))))\n gamma_hat_Q82s = np.vstack((np.zeros((6,6)), gamma_hat_P63es_Q82s, gamma_hat_P63mus_Q82s, gamma_hat_P63taus_Q82s, np.zeros((9,6))))\n\n gamma_hat_Q83u = np.vstack((np.zeros((9,6)), gamma_hat_P62ue_Q83u, gamma_hat_P62umu_Q83u, gamma_hat_P62utau_Q83u, np.zeros((6,6))))\n gamma_hat_Q83d = np.vstack((np.zeros((12,6)), gamma_hat_P62de_Q83d, gamma_hat_P62dmu_Q83d, gamma_hat_P62dtau_Q83d, np.zeros((3,6))))\n gamma_hat_Q83s = np.vstack((np.zeros((15,6)), gamma_hat_P62se_Q83s, gamma_hat_P62smu_Q83s, gamma_hat_P62stau_Q83s))\n\n gamma_hat_Q84u = np.vstack((np.zeros((9,6)), gamma_hat_P62ue_Q84u, gamma_hat_P62umu_Q84u, gamma_hat_P62utau_Q84u, np.zeros((6,6))))\n gamma_hat_Q84d = np.vstack((np.zeros((12,6)), gamma_hat_P62de_Q84d, gamma_hat_P62dmu_Q84d, gamma_hat_P62dtau_Q84d, np.zeros((3,6))))\n gamma_hat_Q84s = np.vstack((np.zeros((15,6)), gamma_hat_P62se_Q84s, gamma_hat_P62smu_Q84s, gamma_hat_P62stau_Q84s))\n\n\n\n\n gamma_hat = np.array([gamma_hat_Q81u, gamma_hat_Q81d, gamma_hat_Q81s, gamma_hat_Q82u, gamma_hat_Q82d, gamma_hat_Q82s,\n gamma_hat_Q83u, gamma_hat_Q83d, gamma_hat_Q83s, gamma_hat_Q84u, gamma_hat_Q84d, gamma_hat_Q84s])\n\n\n # Return the tensor\n\n # tensor, zeile, spalte\n\n return gamma_hat", "def qd(self):\n return self._qd", "def __init__(self, meas, verb,pvsr, default_site,delete_created_measurements,pvsr_default_conf_check_cycle,pvsr_meas_types):\r\n \r\n logging.info(\"adding capability: {0}\".format(meas[\"name\"]))\r\n \r\n self._verb=verb\r\n if verb==mplane.model.VERB_QUERY:\r\n cap = mplane.model.Capability(label=meas[\"name\"]+\"-query\", when = \"past ... now / 15s\", verb=mplane.model.VERB_QUERY)\r\n elif verb==mplane.model.VERB_MEASURE:\r\n cap = mplane.model.Capability(label=meas[\"name\"]+\"-measure\", when = \"now ... future / 15s\", verb=mplane.model.VERB_MEASURE)\r\n else:\r\n raise ValueError(\"Verb is not supported: {0}\".format(verb))\r\n cap.add_result_column(\"time\")\r\n \r\n self._mplane2uda={}\r\n self._uda_name2uda = {}\r\n \r\n self._pvsr_default_conf_check_cycle=pvsr_default_conf_check_cycle\r\n \r\n try:\r\n for k in sorted(meas[\"types\"].keys()):\r\n if \"first\" in meas[\"types\"][k]:\r\n logging.debug(\" result colum: {0}\".format(meas[\"types\"][k][\"first\"]))\r\n cap.add_result_column(meas[\"types\"][k][\"first\"])\r\n if \"second\" in meas[\"types\"][k]:\r\n logging.debug(\" result colum: {0}\".format(meas[\"types\"][k][\"second\"]))\r\n cap.add_result_column(meas[\"types\"][k][\"second\"])\r\n \r\n if \"PropertyType\" in pvsr_meas_types[k]:\r\n for i in range(len(pvsr_meas_types[k][\"PropertyType\"])):\r\n self._uda_name2uda[pvsr_meas_types[k][\"PropertyType\"][i][\"Name\"]]=pvsr_meas_types[k][\"PropertyType\"][i]\r\n \r\n if \"index_mplane_name\" in meas:\r\n logging.debug(\" parameter: {0}\".format(meas[\"index_mplane_name\"]))\r\n cap.add_parameter(meas[\"index_mplane_name\"])\r\n \r\n if \"mplane_constants\" in meas:\r\n for k,v in sorted(meas[\"mplane_constants\"].items()):\r\n logging.debug(\" parameter: {0} with value {1}\".format(k,v))\r\n cap.add_parameter(k,v)\r\n \r\n if \"uda_name2mplane_name\" in meas:\r\n for k,v in sorted(meas[\"uda_name2mplane_name\"].items()):\r\n if k in self._uda_name2uda:\r\n logging.debug(\" parameter: {0}\".format(v))\r\n cap.add_parameter(v)\r\n self._mplane2uda[v]=k\r\n else:\r\n logging.error(\" unknown UDA: {0}\".format(v))\r\n except Exception as e:\r\n logging.critical(\"Error during capability creation: {0}\".format(e))\r\n raise e\r\n\r\n super(PvsrService, self).__init__(cap)\r\n \r\n self._pvsr = pvsr\r\n self._meas = meas\r\n self._default_site = default_site\r\n self._delete_created_measurements = delete_created_measurements\r\n self._pvsr_meas_types = pvsr_meas_types", "def optimize_dcr(dg):\n # files to consider. fixme: right now only works with one file\n sto = lh5.Store()\n lh5_dir = os.path.expandvars(dg.config['lh5_dir'])\n raw_list = lh5_dir + dg.fileDB['raw_path'] + '/' + dg.fileDB['raw_file']\n f_raw = raw_list.values[0] \n \n tb_raw = 'ORSIS3302DecoderForEnergy/raw/'\n tb_data = sto.read_object(tb_raw, f_raw)\n \n cycle = dg.fileDB['cycle'].values[0]\n f_results = f'./temp_{cycle}.h5'\n \n write_output = True\n \n # adjust dsp config \n with open('opt_dcr.json') as f:\n dsp_config = json.load(f, object_pairs_hook=OrderedDict)\n # pprint(dsp_config)\n # exit()\n \n # set dcr parameters\n # rise, flat, dcr_tstart = 200, 1000, 'tp_0+1.5*us' # default\n # dcr_rise, dcr_flat, dcr_tstart = 100, 3000, 'tp_0+3*us' # best so far?\n dcr_rise, dcr_flat, dcr_tstart = 100, 2500, 'tp_0+1*us'\n dsp_config['processors']['dcr_raw']['args'][1] = dcr_rise\n dsp_config['processors']['dcr_raw']['args'][2] = dcr_flat\n dsp_config['processors']['dcr_raw']['args'][3] = dcr_tstart\n \n # set trap energy parameters\n # ene_rise, ene_flat = \"2*us\", \"1*us\" # best? from optimize_trap\n ene_rise, ene_flat = \"10*us\", \"5*us\"\n dsp_config['processors']['wf_trap']['args'][1] = ene_rise\n dsp_config['processors']['wf_trap']['args'][2] = ene_flat\n \n # adjust pole-zero constant\n dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '64.4*us'\n # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '50*us'\n # dsp_config['processors']['wf_pz']['defaults']['db.pz.tau'] = '100*us'\n \n # run dsp\n print('Running DSP ...')\n t_start = time.time()\n pc, tb_out = build_processing_chain(tb_data, dsp_config, verbosity=1)\n pc.execute()\n t_elap = (time.time() - t_start)/60\n print(f'Done. Elapsed: {t_elap:.2f} min')\n \n df_out = tb_out.get_dataframe()\n \n if write_output:\n df_out.to_hdf(f_results, key='opt_dcr')\n print('Wrote output file:', f_results)", "def __init__(self) -> None:\n self.path_config = '/home/equipment/EQ-scripts/equipment.conf'\n self.configParse()\n self.request_devices = \"\"\"With arm_address as (SELECT av.obj_id device_id,\n av.value_raw house_id\n FROM os_usr.dev_attr_values av\n WHERE av.attr_id = 3),\n swithes as (SELECT device_type_id\n FROM os_eqm.device_types\n WHERE device_class IN\n (\n SELECT device_class_id\n FROM os_eqm.device_classes\n WHERE guid IN\n (\n SELECT obj_guid\n FROM os_lib.objects_in_nav_categories\n WHERE nav_cat_id in\n (\n SELECT nav_cat_id\n FROM nav_categories\n WHERE guid = '75C0F3733B084DBDAC604167D298B2F5'\n )\n )\n ))\n SELECT d.device_id,\n na.net_address,\n dt.name,\n trim(os_usr.ertel_utils.get_prop_str(d.device_id,'MAC_ADRES_USTROJSTVA')) \n mac_sw\n FROM os_usr.geo_addresses ga,\n os_eqm.net_addresses na,\n arm_address arm ,\n device_types dt,\n devices d,\n swithes sw\n WHERE arm.house_id = ga.house_id\n and arm.device_id = d.device_id\n and na.device_id = d.device_id and na.is_management = '1'\n AND dt.device_type_id = d.device_type\n and dt.device_type_id in sw.device_type_id\n and ga.unified_house_id = '<house_id>'\n \"\"\"\n self.request_adresses = \"\"\"SELECT av.obj_id device_id, av.value_raw house_id\n FROM os_usr.dev_attr_values av \n WHERE av.attr_id = 2 AND av.VALUE_RAW LIKE '%<house>%'\"\"\"" ]
[ "0.5650388", "0.5577619", "0.55206364", "0.53261775", "0.53095275", "0.5304296", "0.5297168", "0.52906555", "0.52465874", "0.52195776", "0.51906115", "0.51801974", "0.51726836", "0.51407194", "0.5120269", "0.5100831", "0.5087694", "0.50870574", "0.5077208", "0.5037777", "0.50333977", "0.5000341", "0.49937353", "0.49884233", "0.49643183", "0.49461633", "0.49445945", "0.4940466", "0.49197516", "0.49052942" ]
0.57871085
0
create polygon for corresponding mesh code
def create_polygon(meshcode): lat1,lon1 = ju.to_meshpoint(meshcode,0,0) lat2,lon2 = ju.to_meshpoint(meshcode,1,1) poly_text = 'POLYGON (('+str(lon1)+' '+str(lat1)+','+str(lon1)+' '+str(lat2)+','+str(lon2)+' '+str(lat2)+','+str(lon2)+' '+str(lat1)+','+str(lon1)+' '+str(lat1)+'))' return poly_text
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generatePolygons():", "def _createpoly(self):\n return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill=\"\", outline=\"\")", "def make_polygon(*coords):\n global GEOMETRY_SURF, POLYGONS,col\n if len(coords) < 3:\n print(\"Warning: Invalid polygon passed, ignoring...\")\n return\n start = coords[0]\n prev = coords[0]\n for coord in coords:\n POLYGONS |= {coord}\n line = Boundary(prev[0],prev[1],coord[0],coord[1]) #add segment to WALL list\n prev = coord\n line = Boundary(prev[0], prev[1],start[0],start[1])\n #now draw poly\n pygame.draw.polygon(GEOMETRY_SURF,col[\"SHAPECOL\"], coords)\n return", "def generatePolygons(self, *args, **kwargs): \n return 'var PloneMapPolygons = [' + \\\n ''.join([\"{ 'id': '%s', 'path' : %s,'title':'%s'},\" % (object.id, object.polygon, object.Title()) \n for object in self.context.objectValues() \n if hasattr(object, 'polygon') and len(object.polygon) > 0 ])[:-1] \\\n + '];'", "def polygonal(resolution, in_vertices, out_vertices_list = None):\n in_vertices = [Point(in_vertices[k,0],in_vertices[k,1]) for k in range(in_vertices.shape[0])] \n\n domain = mshr.Polygon(in_vertices) # https://bitbucket.org/fenics-project/mshr/wiki/API/Polygon\n # Create polygon defined by the given vertices. Vertices must be in counter-clockwise order and free of self-intersections.\n \n if(out_vertices_list is not None):\n for out_vertices in out_vertices_list:\n out_vertices = [Point(out_vertices[k,0],out_vertices[k,1]) for k in range(out_vertices.shape[0])]\n domain -= mshr.Polygon(out_vertices)\n \n mesh=mshr.generate_mesh(domain, resolution)\n\n # TODO : add refined mesh\n # if(refine_mesh):\n # d = mesh.topology().dim()\n \n # class To_refine(SubDomain):\n # def inside(self, x, on_boundary):\n # return x[1]<=0 and x[1]>= -l_mot/2-h_grid-l_vacuum/4\n\n # to_refine = To_refine()\n # marker = MeshFunction(\"bool\", mesh, d, False)\n # to_refine.mark(marker, True)\n # mesh = refine(mesh,marker)\n\n return mesh", "def create_partition(mesh,polygons,enforce_exact=False):", "def create_polygon(self, vertices, style=None, parent=None):\n d = 'M %f %f L' % (vertices[0].x, vertices[0].y)\n for p in vertices[1:]:\n d = d + ' %f,%f' % (p.x, p.y)\n if vertices[0] != vertices[-1]:\n d = d + ' %f,%f' % (vertices[0].x, vertices[0].y)\n attrs = {'d': d}\n return self.create_path(attrs, style, parent)", "def to_shapely_polygon(self):\n # load shapely lazily, which makes the dependency more optional\n import shapely.geometry\n return shapely.geometry.Polygon([(point[0], point[1]) for point in self.exterior])", "def _proc_polygon(self, tokens, filled):\n\n pts = [(p[\"x\"], p[\"y\"]) for p in tokens[\"points\"]]\n component = Polygon(pen=self.pen, points=pts, filled=filled)\n\n return component", "def from_polyfile(name):\n\n from anuga.utilities.numerical_tools import anglediff\n from math import pi\n import os.path\n root, ext = os.path.splitext(name)\n\n if ext == 'poly':\n filename = name\n else:\n filename = name + '.poly'\n\n\n fid = open(filename)\n\n points = [] #x, y\n values = [] #z\n ##vertex_values = [] #Repeated z\n triangles = [] #v0, v1, v2\n\n lines = fid.readlines()\n\n keyword = lines[0].strip()\n msg = 'First line in .poly file must contain the keyword: POINTS'\n assert keyword == 'POINTS', msg\n\n offending = 0\n i = 1\n while keyword == 'POINTS':\n line = lines[i].strip()\n i += 1\n\n if line == 'POLYS':\n keyword = line\n break\n\n fields = line.split(':')\n assert int(fields[0]) == i-1, 'Point indices not consecutive'\n\n #Split the three floats\n xyz = fields[1].split()\n\n x = float(xyz[0])\n y = float(xyz[1])\n z = float(xyz[2])\n\n points.append([x, y])\n values.append(z)\n\n\n k = i\n while keyword == 'POLYS':\n line = lines[i].strip()\n i += 1\n\n if line == 'END':\n keyword = line\n break\n\n\n fields = line.split(':')\n assert int(fields[0]) == i-k, 'Poly indices not consecutive'\n\n #Split the three indices\n vvv = fields[1].split()\n\n i0 = int(vvv[0])-1\n i1 = int(vvv[1])-1\n i2 = int(vvv[2])-1\n\n #Check for and exclude degenerate areas\n x0 = points[i0][0]\n y0 = points[i0][1]\n x1 = points[i1][0]\n y1 = points[i1][1]\n x2 = points[i2][0]\n y2 = points[i2][1]\n\n area = abs((x1*y0-x0*y1)+(x2*y1-x1*y2)+(x0*y2-x2*y0))/2\n if area > 0:\n\n #Ensure that points are arranged in counter clock-wise order\n v0 = [x1-x0, y1-y0]\n v1 = [x2-x1, y2-y1]\n v2 = [x0-x2, y0-y2]\n\n a0 = anglediff(v1, v0)\n a1 = anglediff(v2, v1)\n a2 = anglediff(v0, v2)\n\n\n if a0 < pi and a1 < pi and a2 < pi:\n #all is well\n j0 = i0\n j1 = i1\n j2 = i2\n else:\n #Swap two vertices\n j0 = i1\n j1 = i0\n j2 = i2\n\n triangles.append([j0, j1, j2])\n ##vertex_values.append([values[j0], values[j1], values[j2]])\n else:\n offending +=1\n\n log.critical('Removed %d offending triangles out of %d'\n % (offending, len(lines)))\n return points, triangles, values", "def give_polygon(vertices, points):\n polygon = np.zeros((len(vertices), 2))\n for i, vertex in enumerate(vertices):\n polygon[i] = points[vertex]\n # End point of a polygon equals to start point\n polygon = polygon.tolist()\n if polygon[-1] != polygon[0]:\n polygon.append(polygon[0])\n return polygon", "def to_poly_file(self, filename):\n\n def getinsidepoint(pts):\n direct = (pts[0] + pts[1] + pts[2]) / 3 - pts[0]\n return pts[0] + 0.001 * direct\n\n if self.dim == 2:\n self.leaveonlyphysicalsurfaces()\n if self.dim == 3:\n self.leaveonlyphysicalvolumes()\n\n # write nodes\n nodes = []\n map = {}\n for x in self.d0.values():\n assert isinstance(x, point)\n nodes.append(x.getxyz())\n map[x.getn()] = len(nodes)\n\n\n s = \"# nodes\\n%d %d 0 0\\n\" % (len(nodes), self.dim)\n if self.dim == 2:\n ptstr = \" %d %f %f\\n\"\n ptstr2 = \" %d %f %f %d\\n\"\n else:\n ptstr = \" %d %f %f %f\\n\"\n ptstr2 = \" %d %f %f %f %d\\n\"\n\n for n, x in enumerate(nodes):\n s += ptstr % tuple([n + 1] + list(x[:self.dim]))\n\n # facets\n # first write external polygon, then hole polygons and then point in each\n # hole polygon\n facets = []\n if self.dim == 2:\n\n hole_pts = []\n regions=[]\n for x2 in self.d2.values():\n assert isinstance(x2, surface)\n for x1 in x2.getlines():\n assert isinstance(x1, line)\n p = [map[y.getn()] for y in x1.getpoints()]\n bc = self.getBCnum(x1.getn())\n facets.append((p, bc))\n\n for hole in x2.getholepoints():\n hole_pts.append(hole.getxyz())\n\n # regions\n for x in self.phys2.values():\n assert isinstance(x, physicalsurface)\n for x2 in x.getsurfaces():\n if not x2.is_hole:\n regions.append(x2.getinsidepoint().getxyz() + [x.getn()])\n\n # number of facets, boundary markers=yes\n s += \"# segments\\n%d 1\\n\" % len(facets)\n for ii, (p, bc) in enumerate(facets):\n # number of corners, corner 1, corner 2, ...\n s += \" %d %s %d\\n\" % (ii + 1, ' '.join([str(ii) for ii in p]), bc)\n # holes\n s += \"# holes\\n%d\\n\" % len(hole_pts)\n for ii, x0 in enumerate(hole_pts):\n # number of corners, corner 1, corner 2, ...\n s += \" %d %s\\n\" % (ii + 1, ' '.join([str(ii) for ii in x0]))\n # regions\n s += \"# regions\\n%d\\n\" % len(regions)\n for ii, x0 in enumerate(regions):\n s += \" %d %f %f %d\\n\" % tuple([ii + 1] + x0)\n\n if self.dim == 3:\n\n for x in self.d2.values():\n assert isinstance(x, surface)\n p = [map[y.getn()] for y in x.getpoints()]\n h = []\n pts = []\n for hole in x.getholepoints():\n h.append([map[y.getn()] for y in hole])\n pts.append(getinsidepoint(hole).getxyz())\n bc = self.getBCnum(x.getn())\n facets.append((p, bc, h, pts))\n # number of facets, boundary markers=yes\n s += \"# segments\\n%d 1\\n\" % len(facets)\n for p, bc, h, holes in facets:\n # number of polygons, # of holes, boundary marker\n s += \" %d %d %d\\n\" % (1 + len(h), len(h), bc)\n # number of corners, corner 1, corner 2, ...\n s += \" %d %s\\n\" % (len(p), ' '.join([str(ii) for ii in p]))\n for x in h:\n # number of corners, corner 1, corner 2, ...\n s += \" %d %s\\n\" % (len(x), ' '.join([str(ii) for ii in p]))\n for i, pt in enumerate(holes):\n # hole #, x, y, z\n s += ptstr % tuple([i + 1] + list(pt))\n\n # volume holes\n s += \"# holes\\n0\\n\"\n # regions\n regions=[]\n for x in self.phys3.values():\n assert isinstance(x, physicalvolume)\n for v in x.getvolumes():\n regions.append(v.getinsidepoint().getxyz()+[x.getn()])\n s += \"# regions\\n%d\\n\" % len(regions)\n for i, x in enumerate(regions):\n s += ptstr2 % tuple([i + 1] + list(x))\n\n open(filename, \"w\").write(s)", "def decorate_scene():\n make_polygon( (100,100),(120,140),(270,70) )\n make_polygon( (300,10), (300,550), (340,452),(380,300), (330,50))\n make_polygon( (200,450), (100,450), (100,500), (200,500) )\n make_polygon( (130,320), (150,300), (140,280) )\n return", "def hex_to_polygon(hexid):\n list_of_coords_list=h3.h3_to_geo_boundary(hexid,geo_json=True)\n return Polygon([tuple(i) for i in list_of_coords_list])", "def regular_polygon(self, n, field = QQ):\n npi = 3.14159265359\n verts = []\n for i in range(n):\n t = 2*npi*i/n\n verts.append([sin(t),cos(t)])\n verts = [[field(RDF(x)) for x in y] for y in verts]\n return Polyhedron(vertices = verts, field = field)", "def polygon(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polygon points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self", "def polygon(self, pointlist, cls=None, style=None, attrs=None):\n payload = self._meta.make_payload(cls, style, attrs)\n pts_str = ' '.join('%s,%s' % (x, y) for x, y in pointlist)\n self.elements.append(\"\"\"<polygon points=\"%s\" %s/>\"\"\" % (pts_str, payload))\n return self", "def regular_polygon(sides, radius, height):\n global _cmds\n _cmds = \"}\\n\\n\" + _cmds\n for wedge in range(sides):\n p1 = _cart(radius, wedge*360/sides)\n p2 = _cart(radius, (wedge+1)*360/sides)\n triangle([0, 0], p1, p2, height)\n _cmds = \"union(){\\n\" + _cmds", "def _create_main_shape(self):\n\n a, b = gc( self.size/2,\n self._ZERO_DEGREES - self.angle,\n self._180_DEGREES + self.angle)\n self.wafer_points = zip(a,b)\n self.wafer_polygon = gdspy.Polygon(self.wafer_points, self.WAFER_LAYER)\n self.cell.add(self.wafer_polygon)", "def create_new_polygon(self, coords, **options):\n\n if 'outline' not in options:\n options['outline'] = self.variables.foreground_color\n if 'width' not in options:\n options['width'] = self.variables.poly_border_width\n if 'fill' not in options:\n options['fill'] = ''\n\n shape_id = self.create_polygon(*coords, **options)\n self.variables.vector_objects[str(shape_id)] = VectorObject(SHAPE_TYPES.POLYGON, options)\n self.variables.shape_ids.append(shape_id)\n self.set_shape_pixel_coords_from_canvas_coords(shape_id, coords)\n self.variables.current_shape_id = shape_id\n return shape_id", "def polygon_from_str(line):\r\n polygon_points = [float(o) for o in line.split(',')[:8]]\r\n polygon_points = np.array(polygon_points).reshape(4, 2)\r\n polygon = Polygon(polygon_points).convex_hull\r\n return polygon", "def produce_polygon(polygon_ordered_coordinates: List, zoom: int, plot_polygon: bool = False) -> Path:\n polygon_tile_points = []\n for item in polygon_ordered_coordinates:\n polygon_tile_points += [Utility.get_tile(*item, zoom)]\n polygon_tile_points += [polygon_tile_points[0]]\n polygon = Path(polygon_tile_points)\n if plot_polygon:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n patch = patches.PathPatch(polygon, facecolor='orange', lw=2)\n ax.add_patch(patch)\n ax.set_xlim(min(polygon_tile_points, key = lambda item: item[0])[0], max(polygon_tile_points, key = lambda item: item[0])[0])\n ax.set_ylim(min(polygon_tile_points, key = lambda item: item[1])[1], max(polygon_tile_points, key = lambda item: item[1])[1])\n plt.show()\n return polygon", "def construct_polygon(self, polygon_longs: List, polygon_lats: List) -> gpd.GeoDataFrame:\n\n polygon_geom = Polygon(zip(polygon_longs, polygon_lats))\n\n crs = {'init': 'epsg:4326'}\n polygon = gpd.GeoDataFrame(index=[0], crs=crs, geometry=[polygon_geom])\n\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.geojson', driver='GeoJSON')\n polygon.to_file(filename=f'{self.polygon_path}/polygon_{self.postfix}.shp', driver=\"ESRI Shapefile\")\n\n self.monitor.info(\"-> Created area polygon.\")\n return polygon", "def _getshapepoly(self, polygon, compound=False):\n if self._resizemode == \"user\" or compound:\n t11, t12, t21, t22 = self._shapetrafo\n elif self._resizemode == \"auto\":\n l = max(1, self._pensize/5.0)\n t11, t12, t21, t22 = l, 0, 0, l\n elif self._resizemode == \"noresize\":\n return polygon\n return tuple((t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon)", "def get_mesh_boundary(triangles):\n # Create edges and sort each vertices on each edge.\n edge0 = triangles[:,0:2]\n edge1 = triangles[:,1:3]\n edge2 = triangles.take((0,2), axis=1)\n edges = np.concatenate((edge0, edge1, edge2), axis=0)\n edge_sort = np.sort(edges, axis=1)\n\n # Get unique edges that are only present once.\n (uniq, uniq_ids, counts) = np.unique(edge_sort, axis=0, return_index=True, return_counts=True)\n edge_inds = np.arange(edge_sort.shape[0], dtype=int)\n outer_edge_ids = edge_inds[np.in1d(edge_inds, uniq_ids[counts==1])]\n outer_edges = edge_sort[outer_edge_ids,:]\n num_outer_edges = outer_edges.shape[0]\n\n # Assume we need to close the polygon.\n num_outer_verts = num_outer_edges + 1\n\n # Loop over outer edges and use traversal method to get ordered vertices.\n v_start = outer_edges[0,0]\n v_end = outer_edges[0,1]\n vert_inds = -1*np.ones(num_outer_verts, dtype=int)\n vert_inds[0] = v_start\n vert_inds[1] = v_end\n vert_num = 2\n outer_edges[0,:] = -1\n for edge_num in range(1,num_outer_edges):\n edge_inds_next = np.where(outer_edges == v_end)\n if (edge_inds_next[0].shape[0] < 1):\n msg = \"Next edge not found for vertex %d\" % v_end\n raise ValueError(msg)\n edge_ind_next = edge_inds_next[0][0]\n vert_ind_next = 0\n if (edge_inds_next[1][0] == 0):\n vert_ind_next = 1\n vert_inds[vert_num] = outer_edges[edge_ind_next, vert_ind_next]\n outer_edges[edge_ind_next, :] = -1\n v_end = vert_inds[vert_num]\n vert_num += 1\n\n return vert_inds", "def polygon_from_str(line):\n # remove possible utf-8 BOM\n if line.startswith('\\xef\\xbb\\xbf'):\n line = line[3:]\n polygon_points = [float(o) for o in line.split(',')[:8]]\n polygon_points = np.array(polygon_points).reshape(4, 2)\n polygon = Polygon(polygon_points).convex_hull\n return polygon", "def generate(pts):\n cmds.polyCreateFacet(name=\"shirt\", p=points)\n cmds.polyTriangulate()\n cmds.polySubdivideFacet(dv=SUBDIVISIONS)\n cmds.polyTriangulate()", "def define_polygon(cls, polygon):\n \n num_obj = cls()\n num_obj.coord = [np.array(polygon)]\n return num_obj", "def generate_mesh(\n poly_coords: np.ndarray,\n hole_coords: Optional[List[np.ndarray]] = None,\n min_points: Optional[int] = None,\n max_edge_length: Optional[float] = None,\n convex_hull: bool = False,\n boundary: Optional[np.ndarray] = None,\n preserve_boundary: bool = False,\n min_angle: float = 32.5,\n **kwargs,\n) -> Tuple[np.ndarray, np.ndarray]:\n poly_coords = ensure_unique(poly_coords)\n if hole_coords is None:\n hole_coords = []\n hole_coords = [ensure_unique(coords) for coords in hole_coords]\n # Facets is a shape (m, 2) array of edge indices.\n # coords[facets] is a shape (m, 2, 2) array of edge coordinates:\n # [(x0, y0), (x1, y1)]\n coords = np.concatenate([poly_coords] + hole_coords, axis=0)\n xmin = coords[:, 0].min()\n dx = np.ptp(coords[:, 0])\n ymin = coords[:, 1].min()\n dy = np.ptp(coords[:, 1])\n r0 = np.array([[xmin, ymin]]) + np.array([[dx, dy]]) / 2\n # Center the coordinates at (0, 0) to avoid floating point issues.\n coords = coords - r0\n indices = np.arange(len(poly_coords), dtype=int)\n if convex_hull:\n if boundary is not None:\n raise ValueError(\n \"Cannot have both boundary is not None and convex_hull = True.\"\n )\n facets = spatial.ConvexHull(coords).simplices\n else:\n if boundary is not None:\n boundary = list(map(tuple, ensure_unique(boundary - r0)))\n indices = [i for i in indices if tuple(coords[i]) in boundary]\n facets = np.array([indices, np.roll(indices, -1)]).T\n # Create facets for the holes.\n for hole in hole_coords:\n hole_indices = np.arange(\n indices[-1] + 1, indices[-1] + 1 + len(hole), dtype=int\n )\n hole_facets = np.array([hole_indices, np.roll(hole_indices, -1)]).T\n indices = np.concatenate([indices, hole_indices], axis=0)\n facets = np.concatenate([facets, hole_facets], axis=0)\n\n mesh_info = triangle.MeshInfo()\n mesh_info.set_points(coords)\n mesh_info.set_facets(facets)\n if hole_coords:\n # Triangle allows you to set holes by specifying a single point\n # that lies in each hole. Here we use the centroid of the hole.\n holes = [\n np.array(Polygon(hole).centroid.coords[0]) - r0.squeeze()\n for hole in hole_coords\n ]\n mesh_info.set_holes(holes)\n\n kwargs = kwargs.copy()\n kwargs[\"allow_boundary_steiner\"] = not preserve_boundary\n if \"min_angle\" not in kwargs:\n kwargs[\"min_angle\"] = min_angle\n\n mesh = triangle.build(mesh_info=mesh_info, **kwargs)\n points = np.array(mesh.points) + r0\n triangles = np.array(mesh.elements)\n if min_points is None and (max_edge_length is None or max_edge_length <= 0):\n return points, triangles\n\n kwargs[\"max_volume\"] = dx * dy / 100\n i = 1\n if min_points is None:\n min_points = 0\n if max_edge_length is None or max_edge_length <= 0:\n max_edge_length = np.inf\n max_length = get_edge_lengths(points, triangles).max()\n while (len(points) < min_points) or (max_length > max_edge_length):\n mesh = triangle.build(mesh_info=mesh_info, **kwargs)\n points = np.array(mesh.points) + r0\n triangles = np.array(mesh.elements)\n edges, is_boundary = get_edges(triangles)\n if preserve_boundary:\n # Only constrain the length of interior edges, i.e. edges not on the boundary.\n edges = edges[~is_boundary]\n edge_lengths = np.linalg.norm(np.diff(points[edges], axis=1), axis=2)\n max_length = edge_lengths.max()\n logger.debug(\n f\"Iteration {i}: Made mesh with {len(points)} points and \"\n f\"{len(triangles)} triangles with maximum interior edge length: \"\n f\"{max_length:.2e}. Target maximum edge length: {max_edge_length:.2e}.\"\n )\n if np.isfinite(max_edge_length):\n kwargs[\"max_volume\"] *= min(0.98, np.sqrt(max_edge_length / max_length))\n else:\n kwargs[\"max_volume\"] *= 0.98\n i += 1\n return points, triangles", "def draw_polygon(self, *points, color=DEFAULT.color):" ]
[ "0.75163543", "0.69011664", "0.6727274", "0.66169375", "0.65865767", "0.65797776", "0.65755314", "0.6541206", "0.6536048", "0.65110767", "0.64228475", "0.6391254", "0.6383122", "0.637157", "0.63180137", "0.6238032", "0.6238032", "0.6224582", "0.61890024", "0.61714065", "0.60916203", "0.60621446", "0.6050022", "0.6044781", "0.6043386", "0.60413194", "0.6039378", "0.6010953", "0.6010471", "0.5977901" ]
0.80410415
0
Downloads a new file and replaces the current
def downloadAndReplaceFile(file_path, download_url): file = urllib.request.urlopen(download_url) with open(file_path, 'wb') as output: output.write(file.read())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_url(self, new_url):\n self._download_url = new_url\n\n if not self.download_filename:\n lang_code = \"\"\n if self.language_code:\n lang_code = self.language_code\n self.download_filename = lang_code + \"_\" + new_url.split(\"/\")[-1]", "def downloadfile(self):\n req = requests.get(self.url, stream=True)\n mdsha256 = hashlib.sha256()\n with gzip.open(self.file_path, \"wb\") as gfile:\n for line in req.iter_lines():\n if line:\n gfile.write(line + b\"\\n\")\n mdsha256.update(line + b\"\\n\")\n\n with open(self.sha_file_name, \"wb\") as sfile:\n sfile.write(mdsha256.digest())\n\n sha256 = mdsha256.digest()\n if self.sha256 != sha256:\n self.sha256 = sha256\n print(\"File updated!\")\n else:\n print(\"File not updated!\")", "def filedownload(source, destination):\n\n # Initiate the download\n urllib.request.urlretrieve(source, destination)", "def download(self, url):\n try:\n webFile = urllib.urlopen(url)\n localFile = open(self.workdir + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n print(\"could not get url \" + url)", "def download_file(url, local_filename, update=False):\n if os.path.isfile(local_filename):\n if not update:\n return\n else:\n os.remove(local_filename)\n\n r = requests.get(url, stream=True)\n # http://stackoverflow.com/questions/15352668/download-and-decompress-gzipped-file-in-memory\n with open(local_filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)", "def download_filename(self, new_name):\n new_name = new_name.strip()\n\n if self._download_path and self.is_downloaded:\n self._move_self_to(new_name=new_name)\n\n self._download_filename = new_name\n self._update_full_path()", "def download(self, download_path):\n return", "def main(url, localfile):\n ph.download_file(url, localfile)", "def save_current_contents(url,update_file):\n r=requests.get(url)\n save_file=update_file+'.original'\n json.dump(r.json(), open(save_file,'w'))\n\n print \"\\nSaved contents of: \\n\\n\\t%s \\n\\nto \\n\\n\\t%s\\n\" % (url,save_file)", "def download(self, url: str, dest: PathLike, force: bool = False):", "def download_file(self, remote_file):\n remote_file.download()", "def download_to_filename(self, filename):\n copyfile(self.name, filename)", "def download_file_nowget(url, fn, cookiejar):\n\tprint \"Downloading %s -> %s\" % (url, fn)\n\turlfile = get_opener(cookiejar).open(url)\n\tchunk_sz = 1048576\n\tbytesread = 0\n\tf = open(fn, \"wb\")\n\n\twhile True:\n\t\tdata = urlfile.read(chunk_sz)\n\t\tif not data:\n\t\t\tprint \".\"\n\t\t\tbreak\n\n\t\tf.write(data)\n\t\tbytesread += len(data)\n\t\tprint \"\\r%d bytes read\" % bytesread,\n\t\tsys.stdout.flush()", "def _download(self):\n self._system.download(\"http://geant4.web.cern.ch/geant4/support/source/\" + self._tar_name)", "def downloadFile(self, base_url, file_name):\n url = os.path.join(base_url, file_name)\n req = urllib2.Request(url)\n try:\n f = urllib2.urlopen(req, timeout=self.timeout)\n local_file = open(os.path.join(self.config.get('PATHS', 'pdfdir'), file_name), \"w\")\n local_file.write(f.read())\n local_file.close()\n except Exception, err:\n print \"[ Failed ]\"\n print \"\\n***ERROR in downloadFile: %s\" % err\n sys.exit(0)", "def download_from_url(path, url):\n filename = url.split(\"/\")[-1]\n found_file = find_file(path, filename, max_depth=0)\n if found_file is None:\n filename = os.path.join(path, filename)\n logging.info(\"Downloading from %s to %s.\" % (url, filename))\n inprogress_filepath = filename + \".incomplete\"\n inprogress_filepath, _ = urllib.request.urlretrieve(\n url, inprogress_filepath, reporthook=download_report_hook)\n # Print newline to clear the carriage return from the download progress.\n print()\n tf.gfile.Rename(inprogress_filepath, filename)\n return filename\n else:\n logging.info(\"Already downloaded: %s (at %s).\" % (url, found_file))\n return found_file", "def download_file(self, url, path):\n print('\\tDownloading: ', path)\n with open(path, 'w') as outfile:\n try:\n response = self._http_client.get(url)\n outfile.write(response.text)\n finally:\n response.close()\n outfile.close()\n gc.collect()", "def _download_file(self, path, info=None):\n self._log.debug(\"Downloading file {!r}\".format(path))\n\n if info is None:\n info = self._git_show(path)\n\n # info *SHOULD* be a basestring\n if not isinstance(info, basestring):\n raise Exception(\"{!r} was not a file! (info was {!r})\".format(\n path,\n info\n ))\n\n dest_path = os.path.join(self._code_dir, path.replace(\"/\", os.path.sep))\n self._save_file(dest_path, info)", "def _update_full_path(self):\n if self.download_dir and self.download_filename:\n self._download_path = os.path.join(self.download_dir,\n self.download_filename)\n else:\n self._download_path = None", "def download_file(src, dst):\n subprocess.check_output(cmd_preamble + [\"cp\", f\"jot://{src}\", dst])", "def _download_backwards(self, date_str):\n self.url = f\"http://example.com/new/url/{date_str}\"\n self.html = self._download()", "def download(self, dest, overwrite=False):\n dest = os.path.abspath(dest)\n try:\n local = get_local(dest)\n except ValueError: # Nothing exists at dest, nothing to worry about.\n local = None\n else: # Something exists here.\n if local.hash() == self.hash: # Nothing to update.\n pdbox.info(\"%s and %s are identical\" % (self.uri, local.path))\n return\n if not overwrite:\n raise ValueError(\"%s already exists\" % local.path)\n\n # To avoid any weird overwriting behaviour in the case of errors, we'll\n # download to a different location first, then move to dest afterwards.\n tmp_dest = os.path.join(\n pdbox.TMP_DOWNLOAD_DIR,\n os.path.basename(dest),\n )\n while os.path.exists(tmp_dest): # Make sure the temp name is unique.\n tmp_dest += \"_\"\n\n if pdbox._args.get(\"dryrun\"):\n pdbox.info(\"Downloaded %s to %s\" % (self.uri, dest))\n return None\n\n # TODO: Progress bars.\n meta = execute(pdbox.dbx.files_download_to_file, tmp_dest, self.path)\n pdbox.debug(\"Metadata response: %s\" % meta)\n\n if not os.path.isdir(os.path.dirname(dest)):\n # Create the parent directories of dest.\n os.makedirs(os.path.dirname(dest))\n\n if not pdbox._args.get(\"dryrun\"):\n # os.rename overwrites files just fine, but not directories.\n if local and isinstance(local, LocalFolder):\n shutil.rmtree(local.path)\n # Move the file from the temp location to dest.\n os.rename(tmp_dest, dest)\n\n pdbox.info(\"Downloaded %s to %s\" % (self.uri, dest))\n return LocalFile(dest) # Return the newly created file.", "def download(filename):\n print \"Downloading\", filename\n file_content = urlopen(\n urljoin(URL_PATH, filename)\n )\n write_data_to_file(\n file_content.read(),\n os.path.join(\n '/tmp',\n filename\n )\n )", "def download(self):\n pass", "def download(self):\n pass", "def download_link(self): # pragma: no cover\n\n if PyFunceble.Check(self.file).is_url():\n # We get the destination.\n destination = self.file.split(\"/\")[-1]\n\n if self.file and self.autocontinue.is_empty():\n # The given file is an URL.\n\n if (\n not PyFunceble.path.isfile(destination)\n or PyFunceble.INTERN[\"counter\"][\"number\"][\"tested\"] == 0\n ):\n # The filename does not exist in the current directory\n # or the currently number of tested is equal to 0.\n\n # We download the content of the link.\n Download(self.file, destination).text()\n\n # We update the global file with the destination.\n self.file = destination", "def download():\n raise NotImplementedError", "def _download_from_url(self, url):\n target_file_name = self.dir + \"/\" + url.split('/')[-1].split('?')[0]\n urllib.urlretrieve (url, target_file_name)", "def _DownloadFile(self, url, local_filename = None, modifiers = \"\",\n force = False):\n try:\n if local_filename == None:\n local_filename = url.split('/')[-1]\n if os.path.isfile(local_filename) and not force:\n if self.verbose:\n print \"File at %s already exists.\" % local_filename\n return local_filename\n if self.dont_download:\n return local_filename\n webFile = urllib2.urlopen(url)\n localFile = open(local_filename, (\"w%s\" % modifiers))\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n os.chmod(local_filename, 0777)\n except urllib2.HTTPError:\n return None\n except urllib2.URLError:\n print \"The url %s is malformed.\" % url\n return None\n return localFile.name", "def _download_file(self, artifact_path, local_path):\n full_path = self.base_artifact_path / artifact_path\n with self.managed_folder.get_file(str(full_path)) as remote_file:\n with open(local_path, \"wb\") as local_file:\n for line in remote_file:\n local_file.write(line)" ]
[ "0.675267", "0.6551912", "0.6546044", "0.65164804", "0.6515489", "0.6470142", "0.6406113", "0.6395002", "0.63866776", "0.6361258", "0.63526785", "0.63443387", "0.63276863", "0.63052297", "0.6251958", "0.6250347", "0.6234136", "0.6224093", "0.6222619", "0.6215353", "0.6196918", "0.6185508", "0.61783755", "0.6169981", "0.6169981", "0.6158316", "0.61445427", "0.6141752", "0.6131552", "0.6129969" ]
0.7659723
0
Check for updates by connecting to an endpoint which returns the latest versions
def checkForUpdates(self): url = self.config.get_conf("Client", "versions-url") try: self._logger.info("Checking for updates...") response = requests.get(url) if 200 <= response.status_code <= 300: data = response.json()[self.mission_name] self.updateGrafana(data) self.updateSubSystems(data) self.updateVersions() else: self._logger.warning("Connection failed to version check endpoint %s", url) except requests.ConnectionError: self._logger.warning("Connection failed to version check endpoint %s", url) except requests.Timeout: self._logger.warning("Connection to version check endpoint %s timed out.", url) except requests.RequestException: self._logger.warning("Something went wrong with the version check %s request.", url) except Exception as exc: self._logger.warning("Something went wrong with version updating: %s", str(exc))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_updates(self):\n try:\n if not common.latest_version(version):\n self.update_notify()\n except:\n self.neterror()", "def check_for_updates():\n last_version = str(request.urlopen(__source__).read().decode(\"utf8\"))\n if str(open(__file__).read()) != last_version:\n log.warning(\"Theres new Version available!, Update from \" + __source__)\n else:\n log.info(\"No new updates!,You have the lastest version of this app.\")", "def check_update():\n try:\n raw_version = urllib.urlopen(VERSIONFILE)\n except IOError as e:\n print UPDATE_FAIL + \"can't fetch version file: \" + str(e)\n else:\n if raw_version.getcode() == 200:\n remote_version = raw_version.read().rstrip()\n if remote_version != VERSION:\n print(UPDATE_WARN + \"version \" + remote_version + \" is available, you have version \"\n + VERSION + \"\\n\\t\" + \"to update run: \" + UPDATECOMMAND)\n else:\n print UPDATE_FAIL + \"can't fetch version file\"", "def check_for_updates():\n try:\n import common.models\n except AppRegistryNotReady: # pragma: no cover\n # Apps not yet loaded!\n logger.info(\"Could not perform 'check_for_updates' - App registry not ready\")\n return\n\n interval = int(common.models.InvenTreeSetting.get_setting('INVENTREE_UPDATE_CHECK_INTERVAL', 7, cache=False))\n\n # Check if we should check for updates *today*\n if not check_daily_holdoff('check_for_updates', interval):\n return\n\n logger.info(\"Checking for InvenTree software updates\")\n\n headers = {}\n\n # If running within github actions, use authentication token\n if settings.TESTING:\n token = os.getenv('GITHUB_TOKEN', None)\n\n if token:\n headers['Authorization'] = f\"Bearer {token}\"\n\n response = requests.get(\n 'https://api.github.com/repos/inventree/inventree/releases/latest',\n headers=headers\n )\n\n if response.status_code != 200:\n raise ValueError(f'Unexpected status code from GitHub API: {response.status_code}') # pragma: no cover\n\n data = json.loads(response.text)\n\n tag = data.get('tag_name', None)\n\n if not tag:\n raise ValueError(\"'tag_name' missing from GitHub response\") # pragma: no cover\n\n match = re.match(r\"^.*(\\d+)\\.(\\d+)\\.(\\d+).*$\", tag)\n\n if len(match.groups()) != 3: # pragma: no cover\n logger.warning(f\"Version '{tag}' did not match expected pattern\")\n return\n\n latest_version = [int(x) for x in match.groups()]\n\n if len(latest_version) != 3:\n raise ValueError(f\"Version '{tag}' is not correct format\") # pragma: no cover\n\n logger.info(f\"Latest InvenTree version: '{tag}'\")\n\n # Save the version to the database\n common.models.InvenTreeSetting.set_setting(\n '_INVENTREE_LATEST_VERSION',\n tag,\n None\n )\n\n # Record that this task was successful\n record_task_success('check_for_updates')", "def check_for_updates(debug):\n # assert CLIENT_CONFIG.PUBLIC_KEY is not None\n client = Client(ClientConfig, refresh=True)\n appUpdate = client.update_check(ClientConfig.APP_NAME,\n cli3.__version__,\n channel='alpha')\n downloader = Downloader(client, appUpdate)\n update_dialog = UpdateDialog(downloader)\n status = UpdateStatus.NO_AVAILABLE_UPDATES\n if appUpdate:\n if debug or hasattr(sys, \"frozen\"):\n ret = update_dialog.exec()\n if ret == QDialog.Accepted:\n if debug:\n logger.debug('Extracting update and restarting...')\n time.sleep(10)\n else:\n appUpdate.extract_restart()\n status = UpdateStatus.EXTRACTING_UPDATE_AND_RESTARTING\n elif ret == QDialog.Rejected:\n status = UpdateStatus.UPDATE_IGNORED\n else:\n status = UpdateStatus.UPDATE_DOWNLOAD_FAILED\n else:\n status = UpdateStatus.UPDATE_AVAILABLE_BUT_APP_NOT_FROZEN\n return status", "def test_get_all_available_release_updates(self):\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/a/version': 'current',\n })\n self._uri({\n '%d.%d/maintained/%d.%d-%d/all/Packages.gz' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): DATA,\n '%d.%d/maintained/component/%s/all/Packages.gz' % (MAJOR, MINOR + 1, 'a'): DATA,\n '%d.%d/maintained/%d.%d-%d/all/Packages.gz' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): DATA,\n })\n versions, component = self.u.get_all_available_release_updates()\n self.assertEqual(['%d.%d-%d' % (MAJOR, MINOR + 1, 0)], versions)\n self.assertEqual('a', component)", "def test_version_check_update_available(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_future\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_future\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertNotIn(\"Already at latest version\", output)\n self.assertIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)", "def query_releases(self,request):\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/updates/query invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\t# be as current as possible.\n\t\tself.uu.ucr_reinit()\n\t\tself.ucr.load()\n\n\t\tappliance_mode = self.ucr.is_true('server/appliance')\n\n\t\tresult = []\n\t\ttry:\n\t\t\trequest.status = SUCCESS\n\t\t\tavailable_versions, blocking_component = self.uu.get_all_available_release_updates()\n\t\t\tfor rel in available_versions:\n\t\t\t\tentry = {}\n\t\t\t\tentry['id'] = rel\n\t\t\t\tentry['label'] = 'UCS %s' % rel\n\t\t\t\tresult.append(entry)\n\t\t\t#\n\t\t\t# appliance_mode=no ; blocking_comp=no → add \"latest version\"\n\t\t\t# appliance_mode=no ; blocking_comp=yes → no \"latest version\"\n\t\t\t# appliance_mode=yes; blocking_comp=no → add \"latest version\"\n\t\t\t# appliance_mode=yes; blocking_comp=yes → add \"latest version\"\n\t\t\t#\n\t\t\tif len(result) and (appliance_mode or not blocking_component):\n\t\t\t\t# UniventionUpdater returns available version in ascending order, so\n\t\t\t\t# the last returned entry is the one to be flagged as 'latest' if there's\n\t\t\t\t# no blocking component.\n\t\t\t\tresult[-1]['label'] = '%s (%s)' % (result[-1]['label'],_('latest version'))\n\n\t\texcept Exception,ex:\n\t\t\trequest.status = FAILURE\n\t\t\tself.finished(request.id, [], str(ex))\n\t\t\treturn\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/updates/query returns: %d entries\" % len(result))\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# -----------------------------------\n\n\t\tself.finished(request.id,result)", "def download_updates_if_available(self):\n current_version = self.get_version(self.get_module_and_path(self._main_dir))\n latest_version = self.get_latest_version()\n\n print('Checking version... ')\n print('\\tCurrent version: ', current_version)\n print('\\tLatest version: ', latest_version)\n\n if not latest_version:\n return False\n\n if (not current_version) or (latest_version > current_version):\n print('Updating...')\n if not self.path_exists(self._module):\n os.mkdir(self._module)\n\n # Check if there's a botched download already. If next directory already exists remove it and tree.\n if self.path_exists(self.get_module_and_path('next')):\n self.rmtree(self.get_module_and_path('next')) # Remove the 'next' directory and contents.\n\n # Create the next directory and download the source files.\n os.mkdir(self.get_module_and_path('next'))\n self.download_all_files(self._github_repo + '/contents/' + self._main_dir, latest_version)\n\n # Last step is to write the .version file only if we have completed the download\n with open(self.get_module_and_path('next/.version'), 'w') as versionfile:\n versionfile.write(latest_version)\n versionfile.close()\n\n return True\n return False", "def checkNewVersionAvailable(self):\n try:\n current = open('docs/VERSION', 'r').read()\n \"\"\"\n Fix bug#13\n \"\"\"\n available = urllib2.urlopen('https://sourceforge.net/p/pytbull/code/ci/master/tree/docs/VERSION?format=raw', timeout=self.timeout).read()\n if current!=available:\n return available.split('\\n')[0]\n else:\n return 0\n except Exception, err:\n print \"***ERROR in checkNewVersionAvailable: %s\" % err\n print \"If you use a proxy, check your configuration.\"\n sys.exit()", "def test_release_update_available_PATCH(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR, PATCH + 1)\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR, NEXT): DATA,\n })\n next = self.u.release_update_available()\n self.assertEqual(NEXT, next)", "def test_up_to_date(self):\n last_public_release = get_pypi_version()\n self.assertFalse(update_available(last_public_release))", "def check_for_updates(package_name, latest_version_str, our_version_str=VERSION):\n our = dict()\n latest = dict()\n for version, suffix in ((our, our_version_str), (latest, latest_version_str)):\n for part in ['major', 'minor', 'patch']:\n version[part], _, suffix = suffix.partition('.')\n version[part] = int(version[part])\n version['suffix'] = suffix\n\n for part in ['major', 'minor', 'patch', 'suffix']:\n if latest[part] > our[part]:\n if part == 'major':\n sys.exit(messages['UpdateRequired'].format(package_name))\n else:\n print >> sys.stderr, messages['UpdateAvailable'].format(package_name)\n return", "def updates_available(self,request):\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/updates/available invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\t\tresult = False\n\t\twhat = 'starting'\n\t\ttry:\n\t\t\t# be as current as possible.\n\t\t\twhat = 'reinitializing UniventionUpdater'\n\t\t\tself.uu.ucr_reinit()\n\t\t\twhat = 'reloading registry'\n\t\t\tself.ucr.load()\n\n\t\t\twhat = 'checking update availability'\n\t\t\tresult = self.uu.component_update_available()\n\n\t\texcept Exception, ex:\n\t\t\ttyp = str(type(ex)).strip('<>')\n\t\t\tmsg = '[while %s] [%s] %s' % (what,typ,str(ex))\n\t\t\t# result['message'] = msg\n\t\t\t# result['status'] = 1\n\t\t\tMODULE.error(msg)\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/updates/available returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# -----------------------------------\n\n\t\trequest.status = SUCCESS\n\t\tself.finished(request.id,result)", "async def fetch_data(self):\n url = URL_HASSIO_VERSION.format(self.upstream)\n try:\n _LOGGER.info(\"Fetch update data from %s\", url)\n with async_timeout.timeout(10, loop=self.loop):\n async with self.websession.get(url) as request:\n data = await request.json(content_type=None)\n\n except (aiohttp.ClientError, asyncio.TimeoutError, KeyError) as err:\n _LOGGER.warning(\"Can't fetch versions from %s -> %s\", url, err)\n return\n\n except json.JSONDecodeError as err:\n _LOGGER.warning(\"Can't parse versions from %s -> %s\", url, err)\n return\n\n # data valid?\n if not data:\n _LOGGER.warning(\"Invalid data from %s\", url)\n return\n\n # update versions\n self._data[ATTR_HOMEASSISTANT] = data.get('homeassistant')\n self._data[ATTR_HASSIO] = data.get('hassio')\n self.save()", "def checkForUpdates(cversion):\r\n \r\n # set a list of constant versions\r\n \r\n if MpGlobal.SAVED_VERSION == \"0.0.0.0\" :\r\n return;\r\n \r\n v1 = \"0.4.2.0\" # update songs in library to contain index values.\r\n \r\n # if any version compares are less than 0 then updates are required.\r\n update = versionCompare(cversion,v1) < 0;\r\n \r\n \r\n \r\n if update:\r\n print \"updates are required\"\r\n runUpdater(cversion);", "def checkVersion(self):\n try:\n respInfo = self._reqSession.get(self._host + \"/static/pythonSDKVersion.txt\")\n if respInfo.status_code != 200 or len(respInfo.text) > 20:\n return\n latestVersion = respInfo.text.strip()\n import eventregistry._version as _version\n currentVersion = _version.__version__\n for (latest, current) in zip(latestVersion.split(\".\"), currentVersion.split(\".\")):\n if int(latest) > int(current):\n logger.info(\"==============\\nYour version of the module is outdated, please update to the latest version\")\n logger.info(\"Your version is %s while the latest is %s\", currentVersion, latestVersion)\n logger.info(\"Update by calling: pip install --upgrade eventregistry\\n==============\")\n return\n # in case the server mistakenly has a lower version that the user has, don't report an error\n elif int(latest) < int(current):\n return\n except:\n pass", "def __get_updates(self, offset=0, timeout=10):\n method = 'getUpdates'\n params = {'timeout': timeout, 'offset': offset}\n resp = requests.get(self.api_url + method, params)\n try:\n return resp.json()['result']\n except KeyError:\n print('TimeoutError')\n sys.exit(1)", "def update_get():\n\n status, error = update.status.get()\n if error:\n return json_response.error(error), 200\n return json_response.success({'status': str(status)})", "def check_for_updates(appname, use_appimageupdate=True):\n z = Zap(appname)\n z.check_for_updates(use_appimageupdate=use_appimageupdate)", "def get_and_update_versions ():\n\n try:\n get_comp_versions (\"ACE\")\n get_comp_versions (\"TAO\")\n\n if opts.update:\n files = []\n files += update_version_files (\"ACE\")\n files += update_version_files (\"TAO\")\n files += create_changelog (\"ACE\")\n files += create_changelog (\"TAO\")\n files += update_spec_file ()\n files += update_debianbuild ()\n\n commit (files)\n\n except:\n print (\"Fatal error in get_and_update_versions.\")\n raise", "def test_do_upgrade(self):\n with self.with_config_update():\n result = self.runner.invoke(\n cli,\n [\n \"upgrade\",\n *self.LOCAL,\n self.ITEM_TYPE,\n f\"{self.ITEM_PUBLIC_ID.author}/{self.ITEM_PUBLIC_ID.name}:latest\",\n ],\n standalone_mode=False,\n )\n assert result.exit_code == 0", "def update(self):\n if self._device.age() > 5:\n # Only poll device if last update was more than 5 seconds ago\n self.request_temp()\n return", "async def check_new_version(now):\n result = await get_newest_version(hass, huuid, include_components)\n\n if result is None:\n return\n\n newest, releasenotes, android, apt = result\n\n # Load data from supervisor on hass.io\n if hass.components.hassio.is_hassio():\n newest = hass.components.hassio.get_homeassistant_version()\n\n # Validate version\n if StrictVersion(newest) > StrictVersion(current_version):\n _LOGGER.info(\"The latest available version is %s\", newest)\n info = 'Dostępna jest nowa wersja ' + newest + '. ' + releasenotes\n hass.states.async_set(\n ENTITY_ID, info, {\n ATTR_FRIENDLY_NAME: 'Aktualizacja',\n \"icon\": \"mdi:update\",\n \"reinstall_dom_app\": True,\n \"reinstall_android_app\": android,\n \"apt\": apt\n }\n )\n # add all entities to keep the order\n # hass.async_add_job(\n # hass.services.async_call(\n # 'group',\n # 'set', {\n # \"object_id\": \"dom_system_version\",\n # \"entities\": [\n # \"sensor.version_info\",\n # \"script.ais_update_system\",\n # \"camera.remote_access\",\n # \"input_boolean.ais_remote_access\",\n # \"sensor.ais_secure_android_id_dom\",\n # \"script.ais_scan_network_devices\",\n # \"script.ais_restart_system\",\n # \"script.ais_stop_system\"]}))\n\n hass.states.async_set(\n 'script.ais_update_system', 'off', {\n ATTR_FRIENDLY_NAME: ' Zainstaluj aktualizację',\n \"icon\": \"mdi:download\"\n }\n )\n\n else:\n info = 'Twój system jest aktualny, wersja ' + newest + '. '\n info += releasenotes\n hass.states.async_set(\n ENTITY_ID, info, {\n ATTR_FRIENDLY_NAME: 'Wersja',\n \"icon\": \"mdi:update\",\n \"reinstall_dom_app\": False,\n \"reinstall_android_app\": False,\n \"apt\": apt\n }\n )\n hass.states.async_set(\n 'script.ais_update_system', 'off', {\n ATTR_FRIENDLY_NAME: ' Sprawdź dostępność aktualizacji',\n \"icon\": \"mdi:refresh\"\n }\n )\n _LOGGER.info(\n \"You are on the latest version (%s) of Assystent domowy\", newest)", "def fetch_update(self, session):\n update_json = self.__send_update_request(session)\n if not update_json:\n return False\n return self.__parse_update(update_json)", "def wait_for_update(self):\n while \"updating_db\" in self.status():\n time.sleep(1)", "def request_changes(self):\n self._check_if_open()\n data = {\"request-changes\": True}\n return self.post(\"request-changes\", data)", "def find_updates(self, versions, last_versions):\n updates = []\n\n for package, current_version in versions.items():\n last_version = last_versions[package]\n if last_version != current_version:\n logger.debug(\n '=> %s current version (%s) and last '\n 'version (%s) are different.',\n package, current_version, last_version\n )\n updates.append(\n (package, last_version)\n )\n\n logger.info('- %d package updates found.', len(updates))\n\n return updates", "def test_release_update_available_CURRENT(self):\n NEXT = '%d.%d-%d' % (MAJOR, MINOR + 1, 0)\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/a/version': 'current',\n })\n self._uri({\n '%d.%d/maintained/%s/all/Packages.gz' % (MAJOR, MINOR + 1, NEXT): DATA,\n })\n self.assertRaises(U.RequiredComponentError, self.u.release_update_available, errorsto='exception')", "def updates_check():\n data = wait_for_callback(client, cb_updates_name)\n self.assertTrue(isinstance(data, dict))" ]
[ "0.7504742", "0.69656986", "0.67726046", "0.6757253", "0.647151", "0.642298", "0.6343375", "0.63147825", "0.62722856", "0.6236601", "0.62349147", "0.62333035", "0.61005026", "0.60946906", "0.6091233", "0.60737616", "0.60723543", "0.60146767", "0.59970456", "0.5980403", "0.596655", "0.5932935", "0.59191793", "0.59189296", "0.58510864", "0.5817127", "0.5814691", "0.57967645", "0.5769193", "0.57593167" ]
0.7609964
0
Stores to new versions after updating
def updateVersions(self): f = open('../versions.pckl', 'wb') pickle.dump(self.versions, f) f.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def upgrade(self):", "def upgrade(self):", "def save(self):\n self.updated_at = datetime.today()\n models.storage.save()", "def save(self):\n self.updated_at = datetime.today()\n models.storage.save()", "def save(self):\r\n self.updated_at = datetime.now()\r\n models.storage.save()", "def save(self):\n if self.hasChanged:\n self.dictFile.data['installers'] = self.data\n self.dictFile.data['sizeCrcDate'] = self.data_sizeCrcDate\n self.dictFile.save()\n self.hasChanged = False", "def save(self):\n self.updated_at = datetime.now()\n models.storage.save()", "def save(self):\n self.updated_at = datetime.now()\n models.storage.save()", "def save(self):\n self.updated_at = datetime.now()\n models.storage.save()", "def switch_to_version(self, version):\n self.current_version = version\n self.save()", "def save(self):\n\n self.updated_at = datetime.now()\n models.storage.save()", "def persist_version():\r\n #it's not necessary to do this every time we persist, but\r\n #this way we don't have to worry about race conditions with resume.py\r\n #reading this\r\n f = open(os.path.join(get_persist_root_dir(), \"sparkVersion\"), 'w')\r\n from spark.internal.version import VERSION \r\n f.write(VERSION)\r\n f.close()", "def update(self):\n # TO DO for updating urls if changed\n pass", "def updateDictFile(self):\n if self.dictFile.vdata.get('version',0): return\n #--Update to version 1\n for name in self.data.keys():\n installer = self.data[name]\n if isinstance(installer,Installer):\n self.data[name] = installer.__copy__(InstallerArchive)\n self.dictFile.vdata['version'] = 1", "def _update_version(self) -> None:\n # Implement in child class.\n raise NotImplementedError", "def save(self):\n from models import storage\n self.updated_at = datetime.datetime.now()\n storage.save()", "def save(self):\n from models import storage\n self.updated_at = datetime.now()\n storage.save()", "def save(self):\n self.updated_at = datetime.now()\n storage.save()", "def save(self):\n self.updated_at = datetime.now()\n storage.save()", "def save(self):\n # TODO (Pierre): code", "def save_increment(self):\n self.version = self.next_available_version()\n return self.save()", "def upgrade(self, old_version, new_version):\n pass", "def update(self):\n self.save_config_file()", "def switch_to_latest_version(self):\n self.current_version = Version.objects.filter(is_published=True).latest()\n self.save()", "def store(self):\n\n pass", "def save(self):\n self.wallet.storage.put(\n \"slp_data_version\", None\n ) # clear key of other older formats.\n data = {\n \"validity\": self.validity,\n \"token_quantities\": {\n k: [[v0, v1] for v0, v1 in v.items()]\n for k, v in self.token_quantities.items()\n },\n \"txo_byaddr\": {\n k.to_storage_string(): list(v) for k, v in self.txo_byaddr.items()\n },\n \"version\": self.DATA_VERSION,\n }\n self.wallet.storage.put(\"slp\", data)", "def persist(self):\n pass", "def save(self, **kwargs):\n super(ProjectCurrentSerializer, self).save(**kwargs)\n \n if hasattr(self, 'uploaded_pot_file'):\n previous_version = self.object.get_current_version()\n current_version = create_new_version(self.object, previous_version.version+1, self.uploaded_pot_file)\n update_catalogs(self.object, previous_version, current_version)\n self.object.save()\n \n return self.object", "def version(self, newVersion=None):\n pass", "def update(self):\n pass" ]
[ "0.7079501", "0.7079501", "0.6628957", "0.6628957", "0.6503125", "0.6460739", "0.645108", "0.645108", "0.645108", "0.6427714", "0.6370648", "0.63382095", "0.6315561", "0.6303461", "0.6257175", "0.6256316", "0.6255571", "0.6195385", "0.6195385", "0.6155903", "0.6142688", "0.6140395", "0.61383015", "0.61131275", "0.604842", "0.6026848", "0.60247785", "0.600338", "0.5997926", "0.59837973" ]
0.72113633
0
Updates the grafana.db fail to display updates dashboards
def updateGrafana(self, data): try: if "version" not in self.versions["grafana"] or self.mission_name + "_" + data["grafana"]["version"] != self.versions["grafana"]["version"]: downloadAndReplaceFile(self.config.get_conf("Client", "grafana-database"), data["grafana"]["link"]) self.versions["grafana"]["version"] = self.mission_name + "_" + data["grafana"]["version"] self._logger.info("Grafana updated to version: " + data["grafana"]["version"]) except Exception as e: self._logger.error("Failed to update Grafana configuration due to an exception: " + str(e))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_dashboards_v2_update(self):\n pass", "def test_update_dashboard(self):\n os.unlink(self.dboard._path)\n self.dboard.update_dashboard()\n self.assertTrue(os.path.isfile(self.dboard._path))", "def fix_db_stats(invalid_books, invalid_translations, invalid_subreddit):\n\t\n\tupdate_book_stats(invalid_books, is_edit_or_delete = True)\n\tupdate_translation_stats(invalid_translations, is_edit_or_delete = True)\n\tupdate_subreddit_stats(invalid_subreddit, is_edit_or_delete = True)", "def update_database(df,label):\n try:\n client = MongoClient(MONGO_URI,event_listeners=[CommandLogger()])\n db = client.get_database('UNSD')\n\n if label == 'unfcc':\n coll = db.get_collection('unfcc')\n coll.delete_many({})\n logger.info('Getting UNFCC data from mongoDB')\n else:\n coll = db.get_collection('ebal')\n coll.delete_many({})\n logger.info('Getting Energy Balance data from mongoDB')\n\n logger.info('Starting Update...')\n\n data_json = json.loads(df.to_json(orient='records'))\n\n coll.insert_many(data_json)\n #for record in json.loads(df.to_json(orient='records')):\n # try:\n # result = coll.replace_one(filter=record, # locate the document if exists\n # replacement=record, # latest document\n # upsert=True) # update if exists, insert if not\n # if result.raw_result['updatedExisting']:\n # logger.info('Update existing record')\n # else:\n # logger.info('Added new record: {}'.format(result.upserted_id))\n # except pymongo.errors.ConnectionFailure as e:\n # logger.error('PyMongo error ConnectionFailure seen: ' + str(e))\n # traceback.print_exc(file = sys.stdout)\n #data_json = json.loads(df.to_json(orient='records'))\n #result = coll.insert_many(data_json)\n #logger.info('Inserted a total of {} records in UNFCC'.format(len(result.inserted_ids)))\n\n except pymongo.errors.ConnectionFailure as e:\n logger.error(\"Exception seen: \" + str(e))\n traceback.print_exc(file = sys.stdout)\n \n finally:\n client.close()", "def sql(self, cursor):\n for entry in self.data:\n sql_data(self, cursor, entry)\n if not update_iemaccess(cursor, entry):\n self.warnings.append(\n f\"IEMAccess Update failed {entry['access_network']} \"\n f\"{entry['access_station']} {entry['cli_valid']}\"\n )", "def update(self, dashboard):\n res = self.es.update(index=self.index, id=dashboard.id, doc_type=self.doc_type,\n body={'doc': dashboard.to_kibana()},\n refresh=True)\n return res", "def updateFailed(self, *failed):\n logging.debug(\"TrackerPlugin.updateFailed\")\n for compId in failed:\n logging.debug(\" ==> %s\" % compId)\n return", "def dashboard():", "def update_failure(self, talk_id, failure):\r\n QtSql.QtSqlQuery('''UPDATE failures SET Comments=\"%s\", Indicator=\"%s\", Release=\"%d\" WHERE Id=\"%s\"''' %\r\n (failure.comment,\r\n failure.indicator,\r\n failure.release,\r\n failure.talkId))\r\n log.info(\"Failure updated: %s %s\" % (failure.talkId, failure.comment))", "def test_updated_invalid(self):\n thread1 = ThreadFactory()\n PostFactory(thread=thread1)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json',\n 'updated': 1, 'updated_date': 'invalid'}\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(1, json.loads(response.content)['total'])", "def restartFailed(self):\n # failed should not be in cache anymore, so working on db is sufficient\n self.db.restartFailed()", "def feed_update_failure(message_data, exception_data):\n feed_id = message_data['args'][0]\n feed = Feed.objects.get(pk=feed_id)\n\n # mark feed as failed to update and stop updateing it automatically\n feed.flagged = True\n feed.save()\n\n notification = Notification(feed=feed, owner=feed.owner, title=exception_data['type'], message=exception_data['message']+f'[Feed: {feed.id}, {feed.link}]', is_error=True)\n notification.save()\n print(\"dramatiq callback: feed update error\")", "def _get_kibana_url(self, entry):\n return self.format_kibana_url(\n query='program: \"mysql-killer\" AND query_class:\"{}\"'.format(entry.get('query_class')),\n columns=self.FIELDS\n )", "def test_invalid_table(self):\n self.execute_query_expect_failure(self.client, \"select * from functional.bad_serde\")\n # The table expires after 1 second. Sleeping for another logbufsecs=5 seconds to wait\n # for the log to be flushed. Wait 4 more seconds to reduce flakiness.\n time.sleep(10)\n assert \"Unexpected exception thrown while attempting to automatically invalidate \"\\\n \"tables\" not in open(os.path.join(self.impala_log_dir, \"catalogd.INFO\")).read()", "def update_exam_warning():\n try:\n data = request.get_json()\n user_id = authenticate_token(request)\n examiner = is_examiner(user_id)\n\n if examiner:\n if not data.get('exam_warning_id'):\n return jsonify({'message':'No exam_warning_id included in payload'}), 400\n\n exam_warning_id = data['exam_warning_id']\n exam_warning = ExamWarning.query.get(exam_warning_id)\n if exam_warning is None:\n return jsonify({'message':'Exam warning with id {} not found'.format(exam_warning_id)}), 404\n \n if data.get('description'): exam_warning.description = data['description']\n if data.get('warning_time'): exam_warning.warning_time = parser.parse(data['warning_time']).replace(tzinfo=None)\n db.session.commit()\n\n return jsonify(exam_warning.to_dict()), 200\n else:\n return jsonify({'user_id': user_id, 'message': ['access denied, not examiner']}), 403\n except exc.SQLAlchemyError as e:\n db.session.rollback()\n return jsonify({ 'message': e.args }), 500\n except Exception as e:\n print(traceback.format_exc())\n return jsonify({ 'message': e.args }), 500", "def update(update_db=True):\n try:\n # time update was triggered\n updated_at = datetime.now().strftime(\"%d %b %Y, %H:%M\")\n\n vis = Visualise(with_tracked_time=False, update_db=update_db)\n\n # Generate preference table\n print(\"Generate preference table...\")\n preferences_table = pref.get_all_preferences_table(\n wim=vis.wim, first_date=vis.START_DATE, last_date=vis.END_DATE\n )\n\n # Save preference table to file\n check_dir(app.config.get(\"DATA_DIR\") + \"/figs/preferences\")\n\n with open(\n app.config.get(\"DATA_DIR\") + \"/figs/preferences/preferences.html\", \"w\"\n ) as f:\n f.write(preferences_table)\n\n # Generate whiteboards\n print(\"Generate whiteboards...\")\n whiteboards = vis.all_whiteboards(update_timestamp=updated_at)\n\n # Save whiteboards to file\n check_dir(app.config.get(\"DATA_DIR\") + \"/figs/projects\")\n\n with open(\n app.config.get(\"DATA_DIR\") + \"/figs/projects/projects.html\", \"w\"\n ) as f:\n f.write(whiteboards[\"project_print\"])\n\n with open(\n app.config.get(\"DATA_DIR\") + \"/figs/projects/project_screen.html\", \"w\"\n ) as f:\n f.write(whiteboards[\"project_screen\"])\n\n check_dir(app.config.get(\"DATA_DIR\") + \"/figs/people\")\n\n with open(app.config.get(\"DATA_DIR\") + \"/figs/people/people.html\", \"w\") as f:\n f.write(whiteboards[\"person_print\"])\n\n with open(\n app.config.get(\"DATA_DIR\") + \"/figs/people/person_screen.html\", \"w\"\n ) as f:\n f.write(whiteboards[\"person_screen\"])\n\n print(\"Convert whiteboards to pdf...\")\n # convert print version html to pdf\n cmd = \"bash {home_dir}/scripts/whiteboard_to_pdf.sh\".format(\n home_dir=app.config.get(\"HOME_DIR\")\n )\n result = subprocess.run(cmd, shell=True, check=True, capture_output=True)\n\n if result.returncode != 0:\n raise ValueError(\n \"whiteboard_to_pdf.sh returned with code \" + str(result.returncode)\n )\n\n # Generate & save demand vs capacity plot\n print(\"Demand vs capacity...\")\n capacity_fig = vis.plot_demand_vs_capacity(\n start_date=datetime.now() - timedelta(365),\n end_date=datetime.now() + timedelta(548),\n freq=\"W-MON\",\n )\n capacity_fig.tight_layout()\n capacity_fig.savefig(\n app.config.get(\"DATA_DIR\") + \"/figs/demand_vs_capacity.png\", dpi=300\n )\n plt.close(\"all\")\n\n with open(\n app.config.get(\"DATA_DIR\") + \"/figs/demand_vs_capacity.html\", \"w\"\n ) as f:\n f.write(\n \"\"\"<!DOCTYPE html>\n <html>\n <head>\n <title>Index</title>\n </head>\n <body>\n <img src=\"demand_vs_capacity.png\" alt=\"demand_vs_capacity\">\n </body>\n </html>\"\"\"\n )\n\n print(\"Make zip file...\")\n # create zip of print version whiteboard files\n with zipfile.ZipFile(\n app.config.get(\"DATA_DIR\") + \"/whiteboard.zip\", \"w\"\n ) as zipf:\n zipf.write(\n app.config.get(\"DATA_DIR\") + \"/figs/projects/project_screen.html\",\n \"projects.html\",\n )\n zipf.write(\n app.config.get(\"DATA_DIR\") + \"/figs/people/person_screen.html\",\n \"people.html\",\n )\n zipf.write(\n app.config.get(\"DATA_DIR\") + \"/figs/projects/projects.pdf\",\n \"projects.pdf\",\n )\n zipf.write(\n app.config.get(\"DATA_DIR\") + \"/figs/people/people.pdf\", \"people.pdf\"\n )\n zipf.write(\n app.config.get(\"DATA_DIR\") + \"/figs/demand_vs_capacity.png\",\n \"demand_vs_capacity.png\",\n )\n\n # save update time to file if everything was successful\n with open(app.config.get(\"DATA_DIR\") + \"/.last_update\", \"w\") as f:\n f.write(updated_at)\n\n return render_template(\"update.html\", updated_at=updated_at)\n\n except:\n return traceback.format_exc()", "def error(update, context):\n\tlogger.warning('Update \"%s\" caused error \"%s\"', update, context.error)", "def error(update, context):\n\tlogger.warning('Update \"%s\" caused error \"%s\"', update, context.error)", "def error(_bot, update, error_):\n logger.warning('Update \"%s\" caused error \"%s\"', update, error_)", "def error(update, context):\n logging.warning('Update \"%s\" ', update)\n logging.exception(context.error)", "def dbUpdate():\n dbAddress = config.get('database', 'dbAddress')\n dbUser = config.get('database', 'dbUser')\n dbPassword = config.get('database', 'dbPassword')\n dbName = config.get('database', 'dbName')\n dbPort = config.getint('database', 'dbPort')\n con = MySQLdb.connect(host=dbAddress, port=dbPort, user=dbUser, passwd=dbPassword,\n db=dbName)\n c = con.cursor()\n\n date = datetime.datetime.now()\n c.execute(\"INSERT INTO sensor_data (date, dht_temp, dht_humidity, cpu_temp, \"\n \"solar_voltage, solar_current, battery_voltage, battery_current, \"\n \"load_voltage, load_current) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,\"\n \"%s)\",\n (date, dht_temp, dht_humidity, cpu_temp, sol_volt_v, sol_curr_ma,\n bat_volt_v, bat_curr_ma, load_volt_v, load_curr_ma))\n\n con.commit()\n con.close()", "def update_database(self) -> None:\n \n # Simulate that we update a database\n time.sleep(10)", "def error(update, context):\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error, extra={'Update_err': True})", "def admin_dash():\n if session['user_admin'] == False:\n abort(403)\n\n yesterday = datetime.utcnow() - timedelta(days=1)\n last_week = datetime.utcnow() - timedelta(days=7)\n # Retrieve all Users\n sqa_sess = sqa_session()\n total_users = sqa_sess.query(User).count()\n new_users_yesterday = sqa_sess.query(User).filter(User.Create_Date > yesterday).count()\n new_users_lastweek = sqa_sess.query(User).filter(User.Create_Date > last_week).count()\n\n active_users_yesterday = sqa_sess.query(User).filter(User.Last_Login_Date > yesterday).count()\n active_users_lastweek = sqa_sess.query(User).filter(User.Last_Login_Date > last_week).count()\n\n total_flights = sqa_sess.query(FlightPlan).count()\n new_flights_yesterday = sqa_sess.query(FlightPlan).filter(FlightPlan.Import_Date >= yesterday).count()\n new_flights_lastweek = sqa_sess.query(FlightPlan).filter(FlightPlan.Import_Date >= last_week).count()\n \n\n return render_template('admin/dashboard.html', total_users=total_users, new_users_yesterday=new_users_yesterday, new_users_lastweek=new_users_lastweek,\n active_users_lastweek=active_users_lastweek, active_users_yesterday=active_users_yesterday,\n total_flights=total_flights, new_flights_lastweek=new_flights_lastweek, new_flights_yesterday=new_flights_yesterday)", "def test_cli_update_failed_device(happy_day_data, action, requests_mock, timeless, caplog):\n _ = timeless\n mock_update_apis(requests_mock, deployment_state='failed')\n assert _common(\n happy_day_data,\n action,\n happy_day_data['fw_file']\n ) == 1\n assert caplog.messages[-11:] == [\n '----------------------------',\n ' Campaign Summary ',\n '----------------------------',\n ' Successfully updated: 0',\n ' Failed to update: 2',\n ' Skipped: 0',\n ' Pending: 0',\n ' Total in this campaign: 2',\n 'Reasons for failed updates:',\n ' Update error, failed',\n 'Failed to update 2 devices: xxxx-device-id-xxxx, yyyy-device-id-yyyy'\n ]", "def test_repair_hive_table_failed_refresh(self, mock_logging):\n self.client.athena_client = MockAthenaClient(result_state='FAILED')\n\n # This bucket is not in our `repair_hive_table` config map\n self.client.repair_hive_table({'unit-testing.streamalerts'})\n assert_true(mock_logging.error.called)", "def update_records(self, something):\n print(\"Some logic (not shown) to update database of units\")", "def error(self, context, update, error):\n\t\tself.logger.warning('Update \"%s\" caused error \"%s\"', update, error)", "def load_failed_tas():\n logger.info('Loading TAS Failing Edits')\n load_all_tas_failing_edits()", "def error(update, context):\r\n logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)" ]
[ "0.64582586", "0.5638242", "0.56262255", "0.5506757", "0.5482997", "0.5392161", "0.53738594", "0.531366", "0.52734226", "0.52485365", "0.52429175", "0.52428406", "0.5215931", "0.5211604", "0.52089715", "0.519161", "0.5191182", "0.5191182", "0.51782215", "0.51731366", "0.5167426", "0.5160578", "0.51597995", "0.5146431", "0.5115192", "0.5110133", "0.5085711", "0.50708395", "0.5062184", "0.5061072" ]
0.6387006
1
Updates the subsystems files to respond to changes in packet configuration
def updateSubSystems(self, data): current_subsystems_files = {} for subsystems_filename in self.versions["subsystems"]: current_subsystems_files[subsystems_filename] = self.versions["subsystems"][subsystems_filename] for subsystems_filename in data["subsystems"]: subsystems_file = data["subsystems"][subsystems_filename] if subsystems_filename == "main": continue try: if subsystems_filename not in current_subsystems_files.keys() or self.mission_name + "_" + subsystems_file["version"] != current_subsystems_files[subsystems_filename]["version"]: downloadAndReplaceFile(subsystems_filename + ".py", subsystems_file["python"]) self.versions["subsystems"][subsystems_filename] = {} self.versions["subsystems"][subsystems_filename]["version"] = self.mission_name + "_" + subsystems_file["version"] self._logger.info("subsystems file " + subsystems_filename + " updated to version: " + subsystems_file["version"]) except Exception as e: self._logger.error("Failed to update subsystems file " + subsystems_filename + " due to an exception: " + str(e))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def updateconfig(self):\n\n # Initialize the yaml data\n ydata = {\"metadata\": self._metadata, \"nodes\": self._nodes}\n\n # Write the system config file\n filename = self._rootdir + self._metadata[\"system_config_file\"]\n with open(filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)", "def _update_subfiles(self) -> None:\n\t\t# Clear list of subfiles\n\t\tself.subfiles.clear()\n\t\t# Iterate over Nodes\n\t\tfor node in self.nodes:\n\t\t\tfor file in node.get_subfiles():\n\t\t\t\tself.subfiles.add(\"{}/{}\".format(self.xml_dir, file))\n\t\t# Iterate over SubNodes\n\t\tfor subnode in self.subnodes:\n\t\t\tfor file in subnode.filenames:\n\t\t\t\tself.subfiles.add(\"{}/{}\".format(self.xml_dir, file))", "def update_ifaces_configs(self):\n # Nothing to be done if no reordering has occurred.\n reordered = self.udev.reordered_devices\n if not reordered:\n return\n\n # Skip if we have already completed this stage\n if self.ifaces_confs:\n return\n\n # Generate candidate list of iface conf files, with\n # associated rule, that need to be processed.\n reordered_files = tuple((r, os.path.join(self.syspaths.ifaces_dir,\n r['from']))\n for r in reordered)\n\n ifaces_confs = self._process_candidate_conf_files(reordered_files)\n\n # Process the main interfaces file, and if it was modified, then\n # include it in the list of interface conf objects to be tracked\n conf = ConfFile(self.syspaths.ifaces_file, self.syspaths)\n conf.replace(self.remap_renamer)\n if conf.dirty:\n ifaces_confs.append(conf)\n\n # At this stage changes have been prepared but are not yet\n # committed to disk\n self._ifaces_confs = ifaces_confs", "def update(self):\n for component in self.components.values():\n try:\n component.update()\n except Exception as e:\n if self.ds.isFMSAttached():\n log.error(\"In subsystem %s: %s\" % (component, e))\n else:\n raise e", "def update(self):\n self.save_config_file()", "def conf_update(self):\n pass", "def modify_filesystem(self, update_dict, obj_fs):\n try:\n adv_smb_params = [\n 'is_cifs_sync_writes_enabled',\n 'is_cifs_op_locks_enabled',\n 'is_cifs_notify_on_write_enabled',\n 'is_cifs_notify_on_access_enabled',\n 'cifs_notify_on_change_dir_depth']\n\n cifs_fs_payload = {}\n fs_update_payload = {}\n\n for smb_param in adv_smb_params:\n if smb_param in update_dict.keys():\n cifs_fs_payload.update({smb_param: update_dict[smb_param]})\n\n LOG.debug(\"CIFS Modify Payload: %s\", cifs_fs_payload)\n\n cifs_fs_parameters = obj_fs.prepare_cifs_fs_parameters(\n **cifs_fs_payload)\n\n fs_update_params = [\n 'size',\n 'is_thin',\n 'tiering_policy',\n 'is_compression',\n 'access_policy',\n 'locking_policy',\n 'description',\n 'cifs_fs_parameters']\n\n for fs_param in fs_update_params:\n if fs_param in update_dict.keys():\n fs_update_payload.update({fs_param: update_dict[fs_param]})\n\n if cifs_fs_parameters:\n fs_update_payload.update(\n {'cifs_fs_parameters': cifs_fs_parameters})\n\n if \"snap_sch_id\" in update_dict.keys():\n fs_update_payload.update(\n {'snap_schedule_parameters': {'snapSchedule':\n {'id': update_dict.get('snap_sch_id')}\n }}\n )\n elif \"is_snap_schedule_paused\" in update_dict.keys():\n fs_update_payload.update(\n {'snap_schedule_parameters': {'isSnapSchedulePaused': False}\n })\n\n obj_fs = obj_fs.update()\n resp = obj_fs.modify(**fs_update_payload)\n LOG.info(\"Successfully modified the FS with response %s\", resp)\n changed = True if resp else False\n\n except Exception as e:\n errormsg = \"Failed to modify FileSystem instance id: {0}\" \\\n \" with error {1}\".format(obj_fs.id, str(e))\n LOG.error(errormsg)\n self.module.fail_json(msg=errormsg)", "def modifyIserver(mswName):\n\n serverpath = \"/usr/local/nextone/bin/server.cfg\"\n try:\n bkupFile = '/tmp/server.cfg.%s.bkup' %mswName\n \n # Copy the server.cfg file from MSW to the local host\n if (os.path.isfile(bkupFile) == False):\n os.system(\"scp -q root@\" + mswName + \":\" + serverpath + \" \" + bkupFile)\n \n os.system('scp -q root@%s:%s /tmp/server.cfg' %(mswName,serverpath))\n\n fin=file('/tmp/server.cfg','r')\n inpList = fin.readlines()\n fin.close()\n\n position = -1\n pstr = '\\tpolicy enumdomain \"e164.com\"\\n '\n\n # Insert the enum domain configuration \n if (inpList.__contains__(pstr) == False):\n # Find the index of maxhunt\n for i in inpList:\n if i.__contains__('maxhunts'):\n position = inpList.index(i)\n break\n\n if position != -1:\n inpList.insert(position,pstr)\n fout=file('/tmp/server.cfg','w')\n fout.writelines(inpList)\n fout.close()\n else:\n log.info('maxhunts entry not present in server.cfg file')\n else:\n log.info('File server.cfg already contains enum '+ \\\n 'policy information')\n\n # Copying the server.cfg file to MSW\n os.system(\"scp -q /tmp/server.cfg root@\" + mswName + \":\" + serverpath )\n\n except Exception, e:\n msg = \"file error: %s\" % str(e)\n #32363 Modified to resolve string formatting error\n log.error('File server.cfg does not exist %s' %str(msg))", "def _run_system_update(args):\n mem_types = set([\"memory\", \"jvm_opts\"])\n args = defaults.update_check_args(args, \"Could not do upgrade of bcbio_system.yaml\")\n system_file = os.path.join(args.datadir, \"galaxy\", \"bcbio_system.yaml\")\n with open(system_file) as in_handle:\n config = yaml.safe_load(in_handle)\n out = copy.deepcopy(config)\n mems = []\n for attrs in config.get(\"resources\", {}).itervalues():\n for key, value in attrs.iteritems():\n if key in mem_types:\n mems.append((key, value))\n common_mem = _calculate_common_memory(mems)\n for prog, attrs in config.get(\"resources\", {}).iteritems():\n for key, value in attrs.iteritems():\n if key == \"cores\":\n out['resources'][prog][key] = int(args.cores)\n elif key in mem_types:\n out[\"resources\"][prog][key] = _update_memory(key, value, args.memory,\n common_mem)\n bak_file = system_file + \".bak%s\" % datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S\")\n shutil.move(system_file, bak_file)\n with open(system_file, \"w\") as out_handle:\n yaml.safe_dump(out, out_handle, default_flow_style=False, allow_unicode=False)", "def update_services():\n\n upload_supervisor_conf()\n upload_nginx_conf()\n upload_gunicorn_conf()", "def _setupFiles(self):\r\n with open(self._conf, 'w') as f:\r\n # Write base config\r\n f.write('lxc.utsname = {0}\\n'.format(self._hostname))\r\n f.write('\\n')\r\n f.write('lxc.rootfs = {0}\\n'.format(self._rootfs))\r\n f.write('lxc.mount = {0}\\n'.format(self._fstab))\r\n\r\n # Write interface config\r\n for name, link, ip, up, down in self._ifs:\r\n f.write('\\n')\r\n f.write('lxc.network.type = veth\\n')\r\n f.write('lxc.network.flags = up\\n')\r\n f.write('lxc.network.name = {0}\\n'.format(name))\r\n\r\n if link:\r\n f.write('lxc.network.link = {0}\\n'.format(link))\r\n\r\n if ip:\r\n f.write('lxc.network.ipv4 = {0}/24\\n'.format(ip))\r\n\r\n if up:\r\n f.write('lxc.network.script.up = {0}\\n'.format(up))\r\n\r\n if down:\r\n f.write('lxc.network.script.down = {0}\\n'.format(down))\r\n\r\n\r\n # Write cgroup config\r\n f.write(_CONFIG_CGROUP)\r\n\r\n # Write capabilities config\r\n # TODO: Add at some point?\r\n # f.write(_CONFIG_CAP)\r\n\r\n with open(self._fstab, 'w') as f:\r\n f.write(_FSTAB_BASE.format(proc=pjoin(self._rootfs, 'proc'),\r\n devpts=pjoin(self._rootfs, 'dev/pts'),\r\n sysfs=pjoin(self._rootfs, 'sys')))\r\n\r\n for src, dst, ro in self._fstabExt:\r\n f.write(_FSTAB_BIND.format(srcDir=src, dstDir=dst,\r\n ro=',ro' if ro else ''))", "def update_system(self, system):\n try:\n rc, storage_system = self.request(\"storage-systems/%s\" % system[\"ssid\"], method=\"POST\", data=system[\"changes\"])\n except Exception as error:\n self.module.warn(\"Failed to update storage system. Array [%s]. Error [%s]\" % (system[\"ssid\"], to_native(error)))", "def test_update_software_configuration_for_system_module(self):\n pass", "def assign_subsystems(self):\n\n self.driver = MecDriver()\n\n systems = {}\n systems[\"ctrl\"] = self\n systems[\"driver\"] = self.driver\n\n self.logger.debug(\"Systems: {}\".format(systems))\n return systems", "def set_rsyslog_new_configuration():\n with open(rsyslog_conf_path, \"rt\") as fin:\n with open(\"tmp.txt\", \"wt\") as fout:\n for line in fin:\n if \"imudp\" in line or \"imtcp\" in line:\n # Load configuration line requires 1 replacement\n if \"load\" in line:\n fout.write(line.replace(\"#\", \"\", 1))\n # Port configuration line requires 2 replacements\n elif \"port\" in line:\n fout.write(line.replace(\"#\", \"\", 2))\n else:\n fout.write(line)\n else:\n fout.write(line)\n command_tokens = [\"sudo\", \"mv\", \"tmp.txt\", rsyslog_conf_path]\n write_new_content = subprocess.Popen(command_tokens, stdout=subprocess.PIPE)\n time.sleep(3)\n o, e = write_new_content.communicate()\n if e is not None:\n handle_error(e,\n error_response_str=\"Error: could not change Rsyslog.conf configuration in -\" + rsyslog_conf_path)\n return False\n print_ok(\"Rsyslog.conf configuration was changed to fit required protocol - \" + rsyslog_conf_path)\n return True", "def apply(self):\n if self.is_embedded():\n self.module.fail_json(msg=\"Cannot add/remove storage systems to SANtricity Web Services Embedded instance.\")\n\n if self.add_discovered_systems or self.systems:\n if self.subnet_mask:\n self.discover_array()\n self.update_storage_systems_info()\n\n # Determine whether the storage system requires updating\n thread_pool = []\n for system in self.systems:\n if not system[\"failed\"]:\n thread = threading.Thread(target=self.update_system_changes, args=(system,))\n thread_pool.append(thread)\n thread.start()\n for thread in thread_pool:\n thread.join()\n else:\n self.update_storage_systems_info()\n\n changes_required = False\n if self.systems_to_add or self.systems_to_update or self.systems_to_remove:\n changes_required = True\n\n if changes_required and not self.module.check_mode:\n add_msg = \"\"\n update_msg = \"\"\n remove_msg = \"\"\n\n # Remove storage systems\n if self.systems_to_remove:\n ssids = []\n thread_pool = []\n for ssid in self.systems_to_remove:\n thread = threading.Thread(target=self.remove_system, args=(ssid,))\n thread_pool.append(thread)\n thread.start()\n ssids.append(ssid)\n for thread in thread_pool:\n thread.join()\n if ssids:\n remove_msg = \"system%s removed: %s\" % (\"s\" if len(ssids) > 1 else \"\", \", \".join(ssids))\n\n thread_pool = []\n\n # Add storage systems\n if self.systems_to_add:\n ssids = []\n for system in self.systems_to_add:\n if not system[\"failed\"]:\n thread = threading.Thread(target=self.add_system, args=(system,))\n thread_pool.append(thread)\n thread.start()\n ssids.append(system[\"ssid\"])\n if ssids:\n add_msg = \"system%s added: %s\" % (\"s\" if len(ssids) > 1 else \"\", \", \".join(ssids))\n\n # Update storage systems\n if self.systems_to_update:\n ssids = []\n for system in self.systems_to_update:\n if not system[\"failed\"]:\n thread = threading.Thread(target=self.update_system, args=(system,))\n thread_pool.append(thread)\n thread.start()\n ssids.append(system[\"ssid\"])\n if ssids:\n update_msg = \"system%s updated: %s\" % (\"s\" if len(ssids) > 1 else \"\", \", \".join(ssids))\n\n # Wait for storage systems to be added or updated\n for thread in thread_pool:\n thread.join()\n\n # Report module actions\n if self.undiscovered_systems:\n undiscovered_msg = \"system%s undiscovered: %s\" % (\"s \" if len(self.undiscovered_systems) > 1 else \"\", \", \".join(self.undiscovered_systems))\n self.module.fail_json(msg=(\", \".join([msg for msg in [add_msg, update_msg, remove_msg, undiscovered_msg] if msg])), changed=changes_required)\n\n self.module.exit_json(msg=\", \".join([msg for msg in [add_msg, update_msg, remove_msg] if msg]), changed=changes_required)\n\n # Report no changes\n if self.undiscovered_systems:\n self.module.fail_json(msg=\"No changes were made; however the following system(s) failed to be discovered: %s.\"\n % self.undiscovered_systems, changed=changes_required)\n self.module.exit_json(msg=\"No changes were made.\", changed=changes_required)", "def updateSubhalos(host,file, host2sub):\n if not (host.ID in host2sub):\n return\n g = open(file,'r')\n for posn in host2sub[host.ID]:\n g.seek(posn)\n line = g.readline()\n sub = MTH.MTHalo(line)\n if sub.pid != host.ID:\n print 'WARNING: ERROR: halo not sub of host! Proceeding anyway'\n tree = MT.MergerTree(file,sub.ID)\n tree.haloList.append(sub)\n if sub.num_prog==0:\n tree.progenitors.append(sub)\n # Now deal with all other halos in the tree\n index = 1\n line = g.readline()\n while line !='' and line[0:5] != '#tree':\n halo = MTH.MTHalo(line)\n tree.haloList.append(halo)\n if halo.num_prog ==0:\n tree.progenitors.append(halo)\n updateLinks(tree.haloList, index)\n line = g.readline()\n index += 1\n host.subhalos.append(sub)\n g.close()", "def update(self):\n self.platform_list.update()\n self.exit_sprite.update()\n self.bagGroup.update()\n self.enemy_list.update()", "def _update_auto_config(self):\n\n # Initialize the yaml data\n nodes = {}\n with open(self._autoconfig_filename, \"r\") as stream:\n try:\n ydata = yaml.load(stream)\n if \"nodes\" in ydata:\n nodes = ydata[\"nodes\"]\n except yaml.YAMLError as exc:\n print(exc)\n return\n\n for i in nodes.items():\n key = i[0]\n node = i[1]\n\n # Interfaces\n node[\"interfaces\"] = {}\n for item in self._nodes[key][\"interfaces\"].items():\n port = item[0]\n interface = item[1]\n\n node[\"interfaces\"][port] = {}\n addr = \"{}\".format(interface[\"pci_address\"])\n node[\"interfaces\"][port][\"pci_address\"] = addr\n if \"mac_address\" in interface:\n node[\"interfaces\"][port][\"mac_address\"] = interface[\"mac_address\"]\n\n if \"total_other_cpus\" in self._nodes[key][\"cpu\"]:\n node[\"cpu\"][\"total_other_cpus\"] = self._nodes[key][\"cpu\"][\n \"total_other_cpus\"\n ]\n if \"total_vpp_cpus\" in self._nodes[key][\"cpu\"]:\n node[\"cpu\"][\"total_vpp_cpus\"] = self._nodes[key][\"cpu\"][\n \"total_vpp_cpus\"\n ]\n if \"reserve_vpp_main_core\" in self._nodes[key][\"cpu\"]:\n node[\"cpu\"][\"reserve_vpp_main_core\"] = self._nodes[key][\"cpu\"][\n \"reserve_vpp_main_core\"\n ]\n\n # TCP\n if \"active_open_sessions\" in self._nodes[key][\"tcp\"]:\n node[\"tcp\"][\"active_open_sessions\"] = self._nodes[key][\"tcp\"][\n \"active_open_sessions\"\n ]\n if \"passive_open_sessions\" in self._nodes[key][\"tcp\"]:\n node[\"tcp\"][\"passive_open_sessions\"] = self._nodes[key][\"tcp\"][\n \"passive_open_sessions\"\n ]\n\n # Huge pages\n node[\"hugepages\"][\"total\"] = self._nodes[key][\"hugepages\"][\"total\"]\n\n # Write the auto config config file\n with open(self._autoconfig_filename, \"w\") as yamlfile:\n yaml.dump(ydata, yamlfile)", "def ModifyScenarioFiles(base_path):\n enumrealm = socket.gethostbyname('enum_realm')\n prirealm = socket.gethostbyname('prv_rsa')\n pubrealm = socket.gethostbyname('pub_rsa')\n\n strList = ['ENUM_REALM_IP','PRI_REALM_IP','PUB_REALM_IP']\n repList = [enumrealm,prirealm,pubrealm]\n fileName = ['Basic_Receiver_enum.xml','Basic_Receiver_pri.xml','Basic_Receiver_pub.xml']\n \n try:\n for i in range(len(strList)):\n zfile=open(base_path + fileName[i],\"r\")\n zList = zfile.readlines()\n zfile.close()\n\n for j in zList:\n if j.__contains__(strList[i]):\n str1 = j.replace(strList[i],repList[i])\n ind = zList.index(j)\n zList[ind] = str1\n break\n \n zfile=open(base_path + fileName[i],\"w\")\n zList = zfile.writelines(zList)\n zfile.close()\n except Exception, e:\n log.error('error: %s' %str(e))", "def process_system(self):\n if self.already_processed or self.dont_run or not self.system_valid:\n return\n\n self.reorder_udev_rules()\n self.update_fcoe_configs()\n self.update_ifaces_configs()\n\n self.commit()", "def initialize_substructure_production(self):\n\n self.wet_storage = WetStorage(self.env, float(\"inf\"))\n takt_time = self.config[\"offshore_substation_substructure\"].get(\"takt_time\", 0)\n attach_time = self.config[\"offshore_substation_topside\"].get(\"attach_time\", 24)\n to_assemble = [1] * self.num_substations\n\n self.assembly_line = SubstationAssemblyLine(to_assemble, takt_time, attach_time, self.wet_storage, 1)\n\n self.env.register(self.assembly_line)\n self.assembly_line.start()", "async def update_specific_config(self, filename: str):\n self.general_logger.debug(f\"File {filename} has changed!\")\n try:\n with open(filename, \"r\") as f:\n j = json.load(f)\n except JSONDecodeError:\n self.general_logger.warning(\n f\"File {filename} has changed but contains invalid json data\"\n )\n return\n\n splits = filename.split(os.path.sep)\n commands = [] # List[Cmd]\n sock_paths = [] # type: List[str]\n\n # if it's from the monitors folder:\n if \"monitors\" in filename.split(os.path.sep):\n sockets = self.monitor_sockets\n elif \"scrapers\" in filename.split(os.path.sep):\n sockets = self.scraper_sockets\n else:\n self.general_logger.debug(\"File not useful.\")\n return\n\n # we are interested in configs, whitelists, blacklists, webhooks\n if splits[-1] == \"whitelists.json\":\n cmd = COMMANDS.SET_SPECIFIC_WHITELIST\n elif splits[-1] == \"configs.json\":\n cmd = COMMANDS.SET_SPECIFIC_CONFIG\n elif splits[-1] == \"blacklists.json\":\n cmd = COMMANDS.SET_SPECIFIC_BLACKLIST\n elif splits[-1] == \"webhooks.json\":\n cmd = COMMANDS.SET_SPECIFIC_WEBHOOKS\n else:\n return\n\n # for every monitor socket\n for name in sockets:\n if name in j:\n sock_path = sockets[name]\n c = Cmd()\n c.cmd = cmd\n # send only the corresponding part to the monitor\n c.payload = j[name]\n commands.append(c)\n sock_paths.append(sock_path)\n\n # prepare to make all the async requests\n tasks = []\n for sock_path, command in zip(sock_paths, commands):\n tasks.append(self.make_request(sock_path, command))\n\n # send the requests\n responses = await asyncio.gather(*tasks) # List[Response]\n\n for response in responses:\n if response.error.value:\n self.general_logger.warning(\n f\"Failed to update config: {response.error}\"\n )", "def update_storage_systems_info(self):\n try:\n rc, existing_systems = self.request(\"storage-systems\")\n\n # Mark systems for adding or removing\n for system in self.systems:\n for existing_system in existing_systems:\n if system[\"ssid\"] == existing_system[\"id\"]:\n system[\"current_info\"] = existing_system\n\n if system[\"current_info\"][\"passwordStatus\"] in [\"unknown\", \"securityLockout\"]:\n system[\"failed\"] = True\n self.module.warn(\"Skipping storage system [%s] because of current password status [%s]\"\n % (system[\"ssid\"], system[\"current_info\"][\"passwordStatus\"]))\n if system[\"current_info\"][\"metaTags\"]:\n system[\"current_info\"][\"metaTags\"] = sorted(system[\"current_info\"][\"metaTags\"], key=lambda x: x[\"key\"])\n break\n else:\n self.systems_to_add.append(system)\n\n # Mark systems for removing\n for existing_system in existing_systems:\n for system in self.systems:\n if existing_system[\"id\"] == system[\"ssid\"]:\n\n # Leave existing but undiscovered storage systems alone and throw a warning.\n if existing_system[\"id\"] in self.undiscovered_systems:\n self.undiscovered_systems.remove(existing_system[\"id\"])\n self.module.warn(\"Expected storage system exists on the proxy but was failed to be discovered. Array [%s].\" % existing_system[\"id\"])\n break\n else:\n self.systems_to_remove.append(existing_system[\"id\"])\n except Exception as error:\n self.module.fail_json(msg=\"Failed to retrieve storage systems. Error [%s].\" % to_native(error))", "def updateConfig(self, conf=None):\r\n if conf is not None:\r\n self.config.update(conf)\r\n if self.visprotocol is not None:\r\n self.visprotocol.updateSettings(self.getConfigData())\r\n # else:\r\n # _LOGGER.warning(\"Visonic link is not set\")\r\n # make the changes to the platform parameters (used in alarm_control_panel)\r\n # the original idea was to keep these separate for multiple partitions but now i'm not so sure its necessary\r\n\r\n self.hass.data[DOMAIN][\"arm_without_code\"] = self.toBool(self.config.get(CONF_ARM_CODE_AUTO, False))\r\n self.hass.data[DOMAIN][\"force_keypad\"] = self.toBool(self.config.get(CONF_FORCE_KEYPAD, False))\r\n self.hass.data[DOMAIN][\"arm_away_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_AWAY, False))\r\n self.hass.data[DOMAIN][\"arm_home_instant\"] = self.toBool(self.config.get(CONF_INSTANT_ARM_HOME, False))\r\n\r\n _LOGGER.debug(\"[Settings] Log Max Entries %s\", self.config.get(CONF_LOG_MAX_ENTRIES))\r\n _LOGGER.debug(\"[Settings] Log Reverse %s\", self.config.get(CONF_LOG_REVERSE))\r\n _LOGGER.debug(\"[Settings] Log Create Event %s\", self.config.get(CONF_LOG_EVENT))\r\n _LOGGER.debug(\"[Settings] Log Final Event %s\", self.config.get(CONF_LOG_DONE))\r\n _LOGGER.debug(\"[Settings] Log XML Filename %s\", self.config.get(CONF_LOG_XML_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV Filename %s\", self.config.get(CONF_LOG_CSV_FN))\r\n _LOGGER.debug(\"[Settings] Log CSV title Row %s\", self.config.get(CONF_LOG_CSV_TITLE))", "def update_shed_config(self, shed_conf):\n for index, my_shed_tool_conf in enumerate(self._dynamic_tool_confs):\n if shed_conf['config_filename'] == my_shed_tool_conf['config_filename']:\n self._dynamic_tool_confs[index] = shed_conf\n self._save_integrated_tool_panel()", "def main():\n\n # endpdoint = \"restconf/data/ietf-interfaces:interfaces\"\n # endpoint = f\"restconf/data/ietf-interfaces:interfaces/interface={name}\"\n\n if len(argv) > 1:\n try:\n inventory = load_inventory(argv[1])\n except FileExistsError as err:\n print(\"FileExistsError: \", err)\n else:\n print(\"You must provide a path to your inventory file.\")\n sys.exit()\n\n r1 = inventory['dev-r1']\n loop = [interface for interface in r1[\"interface\"] if interface[\"name\"] == \"Loopback0\"][0]\n\n payload = render_payload(\n loop,\n \"interface.j2\"\n )\n\n session = create_session(r1[\"username\"], r1[\"password\"])\n endpoint = f\"restconf/data/ietf-interfaces:interfaces/interface=Loopback0\"\n results = put_request(r1[\"host\"],session, endpoint, payload)\n print(results)\n\n save_endpoint = \"restconf/operations/cisco-ia:save-config/\"\n saved = save_config(r1[\"host\"], session, save_endpoint)\n\n # target_routers = [\"dev-r1\"]\n\n # for host_key, attribs in inventory.items():\n\n # if host_key in target_routers:\n # print(f\"configuring interfaces on {host_key}\")\n\n # # create a session imported from restconf_api\n # session = create_session(attribs)\n\n # # get all interfaces\n # results = get_interface(attribs, session, \"Loopback0\")\n\n # interface = results[\"ietf-interfaces:interface\"]\n\n # print(json.dumps(interface))\n # # convert to yaml\n # # yaml_output = yaml.safe_dump(results)\n # # with open(\"vars/interfaces.yml\", \"w\") as file:\n # # file.write(yaml_output)\n\n # # results = update_interfaces(attribs, session)\n # # print(results.text, results.status_code)\n\n # # print(get_interfaces(attribs, session))", "def test_subsystems(self):\n pass", "def update_config_data():\n channels = database_functions.select_all_channels()\n for channel_id in channels:\n channel = channels[channel_id]\n channel_command_data = channel.command_data.sub_commands\n channel_default_command_data = custom_json.load(\n definitions.DEFAULT_COMMAND_DATA).sub_commands\n\n synchronise_command_data(\n channel_default_command_data, channel_command_data)\n\n if (channel.language_id is not None and\n channel.language_id not in definitions.LANGUAGES):\n channel.language_id = None\n\n database_functions.synchronise_channel_update(channel_id, channel)\n\n categories = database_functions.select_all_categories()\n for category_id in categories:\n category = categories[category_id]\n category_command_data = category.command_data.sub_commands\n category_default_command_data = custom_json.load(\n definitions.DEFAULT_COMMAND_DATA,).sub_commands\n\n synchronise_command_data(\n category_default_command_data, category_command_data)\n\n if (category.language_id is not None and\n category.language_id not in definitions.LANGUAGES):\n category.language_id = None\n\n database_functions.synchronise_category_update(category_id, category)\n\n guilds = database_functions.select_all_guilds()\n for guild_id in guilds:\n guild = guilds[guild_id]\n guild_command_data = guild.command_data.sub_commands\n guild_default_command_data = custom_json.load(\n definitions.GUILD_COMMAND_DATA).sub_commands\n\n synchronise_command_data(guild_default_command_data, guild_command_data)\n if (guild.language_id is not None and\n guild.language_id not in definitions.LANGUAGES):\n guild.language_id = None\n\n database_functions.synchronise_guild_update(guild_id, guild)\n\n users = database_functions.select_all_users()\n for user_id in users:\n user = users[user_id]\n user_command_data = user.command_data.sub_commands\n user_default_command_data = custom_json.load(\n definitions.USER_COMMAND_DATA).sub_commands\n\n synchronise_command_data(user_default_command_data, user_command_data)\n if (user.language_id is not None and\n user.language_id not in definitions.LANGUAGES):\n user.language_id = None\n\n database_functions.synchronise_user_update(user_id, user)", "def update(self):\n self.platform_list.update()\n self.enemy_list.update()" ]
[ "0.6238645", "0.5925609", "0.58945334", "0.58627445", "0.58171177", "0.56774724", "0.5671018", "0.54769933", "0.546279", "0.5458001", "0.5442244", "0.5416476", "0.53997326", "0.5390456", "0.53853697", "0.5376636", "0.5373165", "0.53604275", "0.5336013", "0.5318945", "0.5307824", "0.53044826", "0.5296298", "0.5261642", "0.52379215", "0.5232066", "0.5223049", "0.5216258", "0.5212138", "0.5204437" ]
0.7344193
0
Create a function that dispatches to dask for dask array inputs.
def _dask_or_eager_func(name, eager_module=np, list_of_args=False, n_array_args=1): if has_dask: def f(*args, **kwargs): dispatch_args = args[0] if list_of_args else args if any(isinstance(a, dsa.Array) for a in dispatch_args[:n_array_args]): module = dsa else: module = eager_module return getattr(module, name)(*args, **kwargs) else: def f(data, *args, **kwargs): return getattr(eager_module, name)(data, *args, **kwargs) return f
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def array_input(f):\n @wraps(f)\n def wrapped(self, t):\n t = np.atleast_1d(t)\n r = f(self, t)\n return r\n return wrapped", "def gnumpy_func_wrap(f):\n def inner(*args):\n args = garray_to_cudandarray_nested(args)\n res = f(*args)\n if isinstance(res, list):\n res = cudandarray_to_garray_nested(res)\n else:\n # TODO: check for CudaNdArray instance instead\n if not isinstance(res, (float, np.ndarray)):\n res = gput.cudandarray_to_garray(res)\n return res\n return inner", "def _run_dask(\n *,\n name: str,\n data: Array,\n compute: bool,\n method: Union[None, str, Callable],\n func: Callable,\n expand_arg: bool = False,\n) -> Any:\n if expand_arg:\n graph = dict(\n (f\"{name}-{data.name}-{index}\", (func, *item))\n for index, item in enumerate(data.__dask_keys__())\n )\n else:\n graph = dict(\n (f\"{name}-{data.name}-{index}\", (func, item))\n for index, item in enumerate(data.__dask_keys__())\n )\n items = list(graph.keys())\n result_name = f\"{name}-{data.name}-result\"\n graph.update(data.dask)\n graph[result_name] = (sum, items)\n if compute:\n if not method:\n return dask.get(graph, result_name)\n if method in (\"thread\", \"threaded\", \"threading\", \"threads\"):\n return dask.threaded.get(graph, result_name)\n if isinstance(method, str):\n raise ValueError(f\"Invalid method name '{method}'.\")\n return method(graph, result_name)\n return graph, result_name", "def with_numpy(func):\r\n return func", "def serial_job(func, inputs):\n\n output = []\n for i, finput in enumerate(inputs):\n foutput = func(finput)\n output.append(foutput)\n output = np.array(output, dtype=object)\n\n return np.transpose(output)", "def __call__(self, spec : NDArray[Shape['*,*'], Floating], *args, **kwargs):\n return self.call(spec, *args, **kwargs)", "def vectorize_inputs(f):\n return lambda args: f(*np.hsplit(args, args.shape[1]))", "def dask_arr(vals):\n try:\n import dask.array as da\n return da.from_array(vals, chunks=2)\n except ImportError:\n return vals", "def numpyfunc(*args, **kwargs):\n def _decorator(func):\n return numpy.frompyfunc(func, *args, **kwargs)\n return _decorator", "def __call__(self, queue, allocator, new_shape, ary, src_indices=None,\n dst_indices=None, map_values=None, zero_fill=False,\n wait_for=None, range=None, debug=False):\n\n have_src_indices = src_indices is not None\n have_dst_indices = dst_indices is not None\n have_map_values = map_values is not None\n\n if not (have_src_indices or have_dst_indices):\n raise ValueError(\"must specify at least one of src or dest indices\")\n\n if range is None:\n if have_src_indices and have_dst_indices:\n raise ValueError(\n \"must supply range when passing both src and dest indices\")\n elif have_src_indices:\n range = slice(src_indices.shape[0])\n if debug:\n assert int(cl.array.max(src_indices).get()) < len(ary)\n elif have_dst_indices:\n range = slice(dst_indices.shape[0])\n if debug:\n assert int(cl.array.max(dst_indices).get()) < new_shape\n\n if zero_fill:\n array_maker = cl.array.zeros\n else:\n array_maker = cl.array.empty\n\n result = array_maker(queue, new_shape, ary.dtype, allocator=allocator)\n\n kernel = self._get_kernel(ary.dtype,\n src_indices.dtype if have_src_indices else None,\n dst_indices.dtype if have_dst_indices else None,\n have_src_indices,\n have_dst_indices,\n have_map_values)\n\n args = (ary, result)\n args += (src_indices,) if have_src_indices else ()\n args += (dst_indices,) if have_dst_indices else ()\n args += (map_values,) if have_map_values else ()\n\n evt = kernel(*args, queue=queue, range=range, wait_for=wait_for)\n\n return result, evt", "def _call_numpy(func, args, kwargs):\n\n numpy_args, numpy_kwargs = data_transfer._get_numpy_args(args, kwargs)\n\n numpy_res = func(*numpy_args, **numpy_kwargs)\n\n cupy_res = data_transfer._get_cupy_result(numpy_res)\n\n return cupy_res", "def _np_apply_along_axis(func1d, axis: int, arr: np.ndarray) -> np.ndarray:\n\n assert arr.ndim == 2\n assert axis in [0, 1]\n\n if axis == 0:\n result = np.empty(arr.shape[1])\n for i in range(len(result)):\n result[i] = func1d(arr[:, i])\n return result\n\n result = np.empty(arr.shape[0])\n for i in range(len(result)):\n result[i] = func1d(arr[i, :])\n\n return result", "def callArray(func,params,shape):\n call='%s'%func\n call+='np.array([%s.tolist()]*%i)'%(params[0],shape[1])\n for v in params[1:]:\n call+=', np.array([%s.tolist()]*%i).T'%(v,shape[0])\n return call+')'", "def dask(function, argument_list):\n from dask import delayed, compute\n\n if _cluster_setup.dask._connection is None:\n error_message = (\n 'No connection was established to a Dask scheduler that distributes jobs to workers. '\n \"Please use unified_map.cluster_setup.dask and/or Dask's command line \"\n 'interface for\\n'\n ' 1. Starting a scheduler\\n'\n ' 2. Starting several workers\\n'\n ' 3. Connecting to the scheduler')\n raise ConnectionError(error_message)\n\n jobs = [delayed(function)(*args) for args in argument_list]\n result_tuple = compute(*jobs, get=_cluster_setup.dask._connection.get)\n result_list = list(result_tuple)\n return result_list", "def call(self, inputs):\n raise NotImplementedError", "def feed_ndarray(dali_tensor, arr):\n assert dali_tensor.shape() == list(arr.size()), \\\n (\"Shapes do not match: DALI tensor has size {0}\"\n \", but PyTorch Tensor has size {1}\".format(dali_tensor.shape(), list(arr.size())))\n #turn raw int to a c void pointer\n c_type_pointer = ctypes.c_void_p(arr.data_ptr())\n dali_tensor.copy_to_external(c_type_pointer)\n return arr", "def test_compute(local_registry, dask_array, numpy_array):\n q = local_registry.Quantity(dask_array, units_)\n\n comps = add_five(local_registry, q)\n res = comps.compute()\n\n assert np.all(res.m == numpy_array)\n assert not dask.is_dask_collection(res)\n assert res.units == units_\n assert q.magnitude is dask_array", "def operate_on_narray(x, y, function):\n try:\n return [operate_on_narray(a, b, function) for a, b in zip(x, y)]\n # except TypeError as e:\n except TypeError:\n # Not iterable\n return function(x, y)", "def multidim(select=[]):\n\n def decorator(func, *args, **kwargs):\n\n @wraps(func)\n def wrapper(sig, *args, **kwargs):\n\n if sig.ndim == 1:\n out = func(sig, *args, **kwargs)\n\n elif sig.ndim == 2:\n\n # Apply func across rows of the input data\n outs = [func(dat, *args, **kwargs) for dat in sig]\n\n if isinstance(outs[0], tuple):\n\n # Collect together associated outputs from each, in case there are multiple outputs\n out = [np.stack([dat[n_out] for dat in outs]) for n_out in range(len(outs[0]))]\n\n # Sub-select single instance of collection for requested outputs\n out = [dat[0] if ind in select else dat for ind, dat in enumerate(out)]\n\n else:\n out = np.stack(outs)\n\n return out\n\n return wrapper\n\n return decorator", "def to_dask_array(self, chunk_size=(1, 6000, 6000)):\r\n try:\r\n import dask.array as da\r\n except ImportError:\r\n raise ImportError(\"to_dask_array requires optional dependency dask[array].\")\r\n\r\n self.da_arr = da.from_array(self.__arr, chunks=chunk_size)\r\n return self.da_arr", "def is_xarray(func, *dec_args):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n ds_da_locs = dec_args[0]\n if not isinstance(ds_da_locs, list):\n ds_da_locs = [ds_da_locs]\n\n for loc in ds_da_locs:\n if isinstance(loc, int):\n ds_da = args[loc]\n elif isinstance(loc, str):\n ds_da = kwargs[loc]\n\n is_ds_da = isinstance(ds_da, (xr.Dataset, xr.DataArray))\n if not is_ds_da:\n typecheck = type(ds_da)\n raise IOError(\n f\"\"\"The input data is not an xarray DataArray or\n Dataset. climpred is built to wrap xarray to make\n use of its awesome features. Please input an xarray\n object and retry the function.\n\n Your input was of type: {typecheck}\"\"\"\n )\n except IndexError:\n pass\n # this is outside of the try/except so that the traceback is relevant\n # to the actual function call rather than showing a simple Exception\n # (probably IndexError from trying to subselect an empty dec_args list)\n return func(*args, **kwargs)\n\n return wrapper", "def wrapper_func(input):\n assert len(input.shape) == 1\n\n params[i].data = torch.Tensor(input.reshape((shape)))\n\n if(len(keywords.keys()) > 1):\n outputVar = func(*params, **keywords)\n else:\n outputVar = func(*params)\n\n output = outputVar.data.numpy()\n\n return output", "def debug_callback_batching_rule(args, dims, **params):\n axis_size = next(x.shape[i] for x, i in zip(args, dims)\n if i is not None)\n # TODO(sharadmv): implement in terms of rolled loop unstead of\n # unrolled.\n def get_arg_at_dim(i, dim, arg):\n if dim is batching.not_mapped:\n # Broadcast unmapped argument\n return arg\n return lax.index_in_dim(arg, i, axis=dim, keepdims=False)\n outs = []\n for i in range(axis_size):\n args_idx = map(functools.partial(get_arg_at_dim, i), dims, args)\n outs.append(debug_callback_p.bind(*args_idx, **params))\n outs = [jnp.stack(xs) for xs in zip(*outs)]\n return outs, (0,) * len(outs)", "def __array_function__(self, func, types, args, kwargs):\n try:\n if not func.__module__.startswith(\"numpy\"):\n return NotImplemented\n except AttributeError:\n return NotImplemented\n _args = list(map(MetaTensor._convert, args))\n _kwargs = {k: MetaTensor._convert(v) for k, v in kwargs.items()}\n return func(*_args, **_kwargs)", "def test_distributed_compute(local_registry, loop, dask_array, numpy_array):\n q = local_registry.Quantity(dask_array, units_)\n\n with cluster() as (s, [a, b]):\n with Client(s[\"address\"], loop=loop):\n comps = add_five(local_registry, q)\n res = comps.compute()\n\n assert np.all(res.m == numpy_array)\n assert not dask.is_dask_collection(res)\n assert res.units == units_\n\n assert q.magnitude is dask_array", "def _handle__array_ufunc__(self, ufunc, method, *inputs, **kwargs):\n\n if method == '__call__' and ufunc.__name__ in UFUNCS:\n # if `out` is set, it must be a single Bohrium array\n if 'out' not in kwargs or len(kwargs['out']) == 1 and bhary.check(kwargs['out'][0]):\n return UFUNCS[ufunc.__name__](*inputs, **kwargs)\n else:\n warnings.warn(\"Bohrium does not support regular numpy arrays as output, it will be handled by \"\n \"the original NumPy.\", UserWarning, 1)\n else:\n warnings.warn(\"Bohrium does not support ufunc `%s` it will be handled by \"\n \"the original NumPy.\" % ufunc.__name__, UserWarning, 1)\n np_inputs = []\n for i in inputs:\n if bhary.check(i):\n np_inputs.append(i.copy2numpy())\n else:\n np_inputs.append(i)\n return getattr(np, ufunc.__name__)(*np_inputs, **kwargs)", "def enqueue_ops_fn():\n control_deps = []\n per_host_sharded_inputs = []\n num_replicas_per_host = ctx.num_of_replicas_per_host\n with ops.device(device):\n if not inputs.is_dataset:\n raise TypeError('`input_fn` must return a `Dataset` for this mode.')\n for _ in range(num_replicas_per_host):\n # Use control dependencies to ensure a deterministic ordering.\n with ops.control_dependencies(control_deps):\n features, labels = inputs.features_and_labels() # Calls get_next()\n\n inputs_structure_recorder.validate_and_record_structure(\n features, labels)\n flattened_inputs = (\n inputs_structure_recorder.flatten_features_and_labels(\n features, labels))\n\n control_deps.extend(flattened_inputs)\n per_host_sharded_inputs.append(flattened_inputs)\n\n infeed_queue = tpu_feed.InfeedQueue(\n number_of_tuple_elements=len(per_host_sharded_inputs[0]))\n captured_infeed_queue.capture(infeed_queue)\n infeed_queue.set_configuration_from_sharded_input_tensors(\n per_host_sharded_inputs)\n\n per_host_enqueue_ops = infeed_queue.generate_enqueue_ops(\n per_host_sharded_inputs, tpu_ordinal_function=ctx.tpu_ordinal_function)\n return per_host_enqueue_ops", "def _compute_var_image_xarray_dask(\n src_var: xr.DataArray,\n dst_src_ij_images: np.ndarray,\n fill_value: Union[int, float, complex] = np.nan\n) -> da.Array:\n return da.map_blocks(_compute_var_image_xarray_dask_block,\n src_var.values,\n dst_src_ij_images,\n fill_value,\n dtype=src_var.dtype,\n drop_axis=0)", "def a(*args, **kwargs):\n return np.array(*args, **kwargs)", "def input_array(self, register: str, value: ArrayData):\n assert self._call is None, f\"You need to specify all inputs before calling `{self._call}`\"\n assert register in a_regs, f\"Register {register} must be one of the a registers!\"\n assert isinstance(value, ArrayData), f\"{value} is a {type(value)}, expected an array (created with the array([..]) method!\"\n name = self._lookup_array(value)\n self._args += [\"\", f\"# load address to array {name} into {register}\", f\"la {register} {name}\"]" ]
[ "0.6345012", "0.5943624", "0.59142417", "0.5799531", "0.5785533", "0.5699175", "0.561758", "0.5583389", "0.55789495", "0.552335", "0.5488789", "0.5378587", "0.5335627", "0.53254235", "0.5319744", "0.52875775", "0.52390635", "0.5225205", "0.52230674", "0.5208402", "0.5199496", "0.5189637", "0.5188131", "0.51851296", "0.51781756", "0.51727444", "0.51675075", "0.51662624", "0.5153877", "0.5144835" ]
0.632499
1
Supply boundary conditions for an xarray.DataArray da according along the specified dimension. Returns a raw dask or numpy array, depending on the underlying data.
def _apply_boundary_condition(da, dim, left, boundary=None, fill_value=0.0): if boundary not in ["fill", "extend", "extrapolate"]: raise ValueError( "`boundary` must be 'fill', 'extend' or " "'extrapolate', not %r." % boundary ) axis_num = da.get_axis_num(dim) # the shape for the edge array shape = list(da.shape) shape[axis_num] = 1 base_array = da.data index = slice(0, 1) if left else slice(-1, None) edge_array = da.isel(**{dim: index}).data use_dask = has_dask and isinstance(base_array, dsa.Array) if boundary == "extend": boundary_array = edge_array elif boundary == "fill": args = shape, fill_value kwargs = {"dtype": base_array.dtype} if use_dask: full_func = dsa.full kwargs["chunks"] = edge_array.chunks else: full_func = np.full boundary_array = full_func(*args, **kwargs) elif boundary == "extrapolate": gradient_slice = slice(0, 2) if left else slice(-2, None) gradient_sign = -1 if left else 1 linear_gradient = da.isel(**{dim: gradient_slice}).diff(dim=dim).data boundary_array = edge_array + gradient_sign * linear_gradient return boundary_array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_category_inside_boundary(self: Union[pd.DataFrame, pd.Series],\n boundary: dict, *,\n inclusive: bool = True) -> ndarray:\n pass\n boolean_array = np.full(self.__getattribute__('shape')[0], fill_value=True)\n for this_dim, this_dim_limit in boundary.items():\n boolean_array = np.bitwise_and(boolean_array,\n self[this_dim].between(*this_dim_limit, inclusive=inclusive).values)\n return boolean_array", "def _last_item_cond_true(cond, dim):\n # force DataArray because isel (when transforming to dim space) requires DataArray\n if isinstance(cond, xr.Dataset):\n was_dataset = True\n cond = cond.to_array()\n else:\n was_dataset = False\n # index last True\n reached = cond.argmin(dim)\n # fix below one\n reached = reached.where(reached >= 1, np.nan)\n # reset where always true to len(lead)\n reached = reached.where(~cond.all(\"lead\"), other=cond[dim].size)\n # fix locations where always nan to nan\n mask = cond.notnull().all(\"lead\")\n reached = reached.where(mask, other=np.nan)\n # shift back into coordinate space\n # problem: cannot convert nan to idx in isel\n # therefore set to dim:0 and mask again afterwards\n reached_notnull = reached.notnull() # remember where not masked\n reached = reached.where(\n reached.notnull(), other=cond.isel({dim: 0})\n ) # set nan to dim:0\n # take one index before calculated by argmin\n reached_dim_space = cond[dim].isel(\n {dim: reached.astype(int) - 1}\n ) # to not break conversion to dim space\n reached_dim_space = reached_dim_space.where(\n reached_notnull, other=np.nan\n ) # cleanup replace dim:0 with nan again\n if was_dataset:\n reached_dim_space = reached_dim_space.to_dataset(dim=\"variable\").squeeze(\n drop=True\n )\n if \"lead\" in reached_dim_space.coords:\n reached_dim_space = reached_dim_space.drop_vars(\"lead\")\n return reached_dim_space", "def boundify(array, trim=False):\n if not any(d is None for d in array.datashape.dim_high):\n return array\n\n if trim:\n return _boundify_trim(array)\n\n # use special scan syntax to get current bounds. eval() required\n dims = array.eval().dimensions().project('low', 'high').toarray()\n ds = array.datashape.copy()\n ds.dim_low = tuple(dims['low'])\n ds.dim_high = tuple(dims['high'])\n\n array = array.redimension(ds.schema)\n\n return array", "def bin_obs_data(ds, s_lat=-30, n_lat=30, bin_var_nm='omega500',\n grp_time_var='year', bins=np.arange(0,1.1,0.1), land_sea='global', land_mask_dir='./data/'):\n ds_m = ds.where(np.logical_and(ds.lat>=s_lat, ds.lat<=n_lat), drop=True)\n\n ds_mask = xr.open_dataset(os.path.join(land_mask_dir, 'era_land_t42.nc'), decode_times=False)\n ds_mask = ds_mask.where(np.logical_and(ds_mask.lat>=s_lat,ds_mask.lat<=n_lat), drop=True)\n #ds_m.coords['mask'] = (('lat', 'lon'), ds_mask.land_mask.values)\n\n bin_data_dict = {'omega500': ds_m.omega500} \n\n vars_dict = {}\n\n ## 3d variables\n bin_data_dict2 = copy.deepcopy(bin_data_dict)\n pdf_m, ds_bin_mean_m, dims, coords2 = select_3d_obs_data(ds_m, bin_data_dict2, ds_mask,\n bins, bin_var_nm=bin_var_nm, land_sea=land_sea, grp_time_var=grp_time_var)\n for key, val in ds_bin_mean_m.items():\n vars_dict[key] = (dims, val)\n \n vars_dict['pdf'] = (dims, pdf_m)\n ds_bin_mean_m_array = xr.Dataset(vars_dict, coords=coords2)\n\n return ds_bin_mean_m_array", "def make_test_dataArray():\n x = np.zeros(shape=(3,31))\n x[0,:] = np.nan\n x[1,[1,2,3,4,5,6,15,23,24,25]] = [np.nan,np.nan,0.1,0.5,2.,2.,2.,2.,0.9,2.]\n x[2,[3,4,5,6,15,23,24,25]] = [0.1,0.5,2.,2.,2.,2.,0.9,2.]\n da = xr.DataArray(x, dims=['x','time'])\n da.coords['time'] = pd.date_range('19790101', freq='D', periods=31)\n return da", "def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def extract(condition, x):\n\n if dpnp.is_supported_array_type(condition) and dpnp.is_supported_array_type(\n x\n ):\n if condition.shape != x.shape:\n pass\n else:\n dpt_condition = (\n condition.get_array()\n if isinstance(condition, dpnp_array)\n else condition\n )\n dpt_array = x.get_array() if isinstance(x, dpnp_array) else x\n return dpnp_array._create_from_usm_ndarray(\n dpt.extract(dpt_condition, dpt_array)\n )\n\n return call_origin(numpy.extract, condition, x)", "def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> DataArray:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def getvarboundslice(self,first_,last_,bk_,bl_,bu_):\n _bk_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and bk_ is not None and len(bk_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bk is not long enough: Is %d, expected %d\" % (len(bk_),((last_) - (first_))))\n if isinstance(bk_,numpy.ndarray) and not bk_.flags.writeable:\n raise ValueError(\"Argument bk must be writable\")\n if bk_ is not None:\n _bk_tmp = (ctypes.c_int32 * len(bk_))()\n else:\n _bk_tmp = None\n _bl_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and bl_ is not None and len(bl_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bl is not long enough: Is %d, expected %d\" % (len(bl_),((last_) - (first_))))\n if isinstance(bl_,numpy.ndarray) and not bl_.flags.writeable:\n raise ValueError(\"Argument bl must be writable\")\n if isinstance(bl_, numpy.ndarray) and bl_.dtype is numpy.dtype(numpy.float64) and bl_.flags.contiguous:\n _bl_copyarray = False\n _bl_tmp = ctypes.cast(bl_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif bl_ is not None:\n _bl_copyarray = True\n _bl_np_tmp = numpy.zeros(len(bl_),numpy.dtype(numpy.float64))\n _bl_np_tmp[:] = bl_\n assert _bl_np_tmp.flags.contiguous\n _bl_tmp = ctypes.cast(_bl_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _bl_copyarray = False\n _bl_tmp = None\n \n _bu_minlength = ((last_) - (first_))\n if ((last_) - (first_)) > 0 and bu_ is not None and len(bu_) != ((last_) - (first_)):\n raise ValueError(\"Array argument bu is not long enough: Is %d, expected %d\" % (len(bu_),((last_) - (first_))))\n if isinstance(bu_,numpy.ndarray) and not bu_.flags.writeable:\n raise ValueError(\"Argument bu must be writable\")\n if isinstance(bu_, numpy.ndarray) and bu_.dtype is numpy.dtype(numpy.float64) and bu_.flags.contiguous:\n _bu_copyarray = False\n _bu_tmp = ctypes.cast(bu_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n elif bu_ is not None:\n _bu_copyarray = True\n _bu_np_tmp = numpy.zeros(len(bu_),numpy.dtype(numpy.float64))\n _bu_np_tmp[:] = bu_\n assert _bu_np_tmp.flags.contiguous\n _bu_tmp = ctypes.cast(_bu_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_double))\n else:\n _bu_copyarray = False\n _bu_tmp = None\n \n res = __library__.MSK_XX_getvarboundslice(self.__nativep,first_,last_,_bk_tmp,_bl_tmp,_bu_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if bk_ is not None: bk_[:] = [ boundkey(v) for v in _bk_tmp[0:len(bk_)] ]\n if _bl_copyarray:\n bl_[:] = _bl_np_tmp\n if _bu_copyarray:\n bu_[:] = _bu_np_tmp", "def dask_data_to_xarray(self, df, var=None):\n\n lazy_values = [dask.delayed(df[dim].unique()) for dim in self.DIMS]\n dims_values = [future for future in dask.compute(*lazy_values)]\n shape = tuple([len(x) for x in dims_values])\n\n var_array = df[var].values\n var_array.compute_chunk_sizes()\n var_array_reshape = var_array.reshape(shape)\n tuple_data = (self.DIMS, var_array_reshape)\n\n coords_dict = dict(zip(self.DIMS, dims_values))\n #values_dicts = dict(zip(extract_vars, values_arrays))\n\n xarr = xr.DataArray(var_array_reshape, \n coords=dims_values,\n dims=self.DIMS)\n\n return xarr.sortby(['lat', 'lon'])", "def is_xarray(func, *dec_args):\n\n @wraps(func)\n def wrapper(*args, **kwargs):\n try:\n ds_da_locs = dec_args[0]\n if not isinstance(ds_da_locs, list):\n ds_da_locs = [ds_da_locs]\n\n for loc in ds_da_locs:\n if isinstance(loc, int):\n ds_da = args[loc]\n elif isinstance(loc, str):\n ds_da = kwargs[loc]\n\n is_ds_da = isinstance(ds_da, (xr.Dataset, xr.DataArray))\n if not is_ds_da:\n typecheck = type(ds_da)\n raise IOError(\n f\"\"\"The input data is not an xarray DataArray or\n Dataset. climpred is built to wrap xarray to make\n use of its awesome features. Please input an xarray\n object and retry the function.\n\n Your input was of type: {typecheck}\"\"\"\n )\n except IndexError:\n pass\n # this is outside of the try/except so that the traceback is relevant\n # to the actual function call rather than showing a simple Exception\n # (probably IndexError from trying to subselect an empty dec_args list)\n return func(*args, **kwargs)\n\n return wrapper", "def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n self.set_attrs(dim0=extra_dims[0])\n elif len(extra_dims) == 0:\n self._obj.coords[GEO_MAP_COORD].attrs.pop(\"dim0\", None)\n elif len(extra_dims) > 1:\n raise ValueError(\"Only 2D and 3D data arrays supported.\")\n if isinstance(self._obj, xr.Dataset):\n check = np.all([self._obj[name].dims == dims for name in self.vars])\n else:\n check = self._obj.dims == dims\n if check == False:\n raise ValueError(\n f\"Invalid dimension order ({da.dims}). \"\n f\"You can use `obj.transpose({dims}) to reorder your dimensions.\"\n )", "def create_valid_subset(model_data, name, config):\n\n # Start with a mask that is True where the tech exists at a node (across all timesteps and for a each carrier and cost, where appropriate)\n imask = _imask_foreach(model_data, config.foreach)\n if imask is False: # i.e. not all of 'foreach' are in model_data\n return None\n # Add \"where\" info as imasks\n where_array = config.get_key(\"where\", default=[])\n if where_array:\n imask = _imask_where(model_data, name, where_array, imask, \"and_\")\n\n # Add imask based on subsets\n imask = _subset_imask(name, config, imask)\n\n # Only build and return imask if there are some non-zero elements\n if isinstance(imask, xr.DataArray) and imask.sum() != 0:\n # Squeeze out any unwanted dimensions\n if len(imask.dims) > len(config.foreach):\n imask = imask.sum([i for i in imask.dims if i not in config.foreach]) > 0\n # We have a problem if we have too few dimensions at this point...\n if len(imask.dims) < len(config.foreach):\n raise ValueError(f\"Missing dimension(s) in imask for set {name}\")\n\n valid_subset = _get_valid_subset(\n reorganise_xarray_dimensions(imask).astype(bool)\n )\n\n return valid_subset\n\n else:\n return None", "def _process_data(data, band):\n\n meta = {key:value for key,value in data[0].items() if key != \"subset\" }\n meta['band'] = band\n data_dict = {'dates': [], 'arrays': [], 'metadata': meta}\n for i in data:\n for j in i['subset']:\n if j['band'] == band:\n data_dict['dates'].append(j['calendar_date'])\n data = []\n for x in j['data']:\n try:\n data.append(float(x))\n except ValueError:\n data.append(np.nan) \n data_dict['arrays'].append(np.array(data).reshape(meta['nrows'], \n meta['ncols'])) \n dtdates = [dt.datetime.strptime(d,\"%Y-%m-%d\") for d in data_dict['dates']]\n xcoordinates = ([float(meta['xllcorner'])] + \n [i * meta['cellsize'] + float(meta['xllcorner']) \n for i in range(1, meta['ncols'])])\n ycoordinates = ([float(meta['yllcorner'])] + \n [i * meta['cellsize'] + float(meta['yllcorner'])\n for i in range(1, meta['nrows'])])\n return xr.DataArray(name = band,\n data = np.flipud(np.dstack(data_dict['arrays'])),\n coords = [np.array(ycoordinates), \n np.array(xcoordinates), dtdates],\n dims = [ \"y\", \"x\", \"time\" ],\n attrs = meta)", "def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def any(\n self,\n dim: Dims = None,\n *,\n keep_attrs: bool | None = None,\n **kwargs: Any,\n ) -> Dataset:\n if (\n flox_available\n and OPTIONS[\"use_flox\"]\n and contains_only_chunked_or_numpy(self._obj)\n ):\n return self._flox_reduce(\n func=\"any\",\n dim=dim,\n numeric_only=False,\n # fill_value=fill_value,\n keep_attrs=keep_attrs,\n **kwargs,\n )\n else:\n return self.reduce(\n duck_array_ops.array_any,\n dim=dim,\n numeric_only=False,\n keep_attrs=keep_attrs,\n **kwargs,\n )", "def __nc_dataset(self, name, data_sel, fill_as_nan):\n if name not in self.fid['/target_product'].variables.keys():\n raise ValueError('dataset {} for found'.format(name))\n\n dset = self.fid['/target_product/{}'.format(name)]\n res = dset[:].reshape(self.scanline, self.ground_pixel)\n if data_sel is not None:\n res = res[data_sel]\n\n if fill_as_nan:\n return res.filled(np.nan)\n\n return res.data", "def get_var(dataset, varname):\n \n import xarray as xr\n \n var = dataset[varname]\n time = dataset['TIME']\n lat = dataset['YDim']\n lon = dataset['XDim']\n\n da = xr.DataArray(var[:,:,:], coords=[time[:],lat[:],lon[:]], dims=['time','lat','lon'],\n attrs=var.attributes, name=varname)\n \n da['time'].attrs = time.attributes\n da['lat'].attrs = lat.attributes\n da['lon'].attrs = lon.attributes\n\n # Set _FillValue for coordinate arrays\n da.lat.encoding['_FillValue'] = 9.969209968386869e+36\n da.lon.encoding['_FillValue'] = 9.969209968386869e+36\n\n # To avoid conflicts between _FillValue and missing_value attributes when file is read\n da.attrs.pop('fmissing_value')\n da.attrs.pop('missing_value')\n \n return da", "def dask_gd2_nanfill(xx, yy, z_array, algorithm='cubic', **kwargs):\n n_jobs = kwargs.pop(\"n_jobs\", 4)\n chunk_size = kwargs.get(\"chunk_size\", int(xx.size / (n_jobs - 1)))\n # make dask arrays\n dask_xyz = da.from_array((xx, yy, z_array), chunks=(3, chunk_size, \"auto\"), name=\"dask_all\")\n dask_xx = dask_xyz[0,:,:]\n dask_yy = dask_xyz[1,:,:]\n dask_zz = dask_xyz[2,:,:]\n\n # select only valid values\n dask_valid_x1 = dask_xx[~da.isnan(dask_zz)]\n dask_valid_y1 = dask_yy[~da.isnan(dask_zz)]\n dask_valid_z1 = dask_zz[~da.isnan(dask_zz)]\n\n # interpolate for missing values\n return dask_interpolate(dask_valid_x1, dask_valid_y1, dask_valid_z1, dask_xx, dask_yy, algorithm=algorithm, **kwargs)", "def ensure_dims(array: xr.DataArray, *dimensions: Hashable) -> xr.DataArray:\n missing_dims = set(dimensions) - set(array.dims)\n\n new_dims = defaultdict(list)\n for coord in missing_dims:\n cdim_tuple = array.coords[coord].dims\n\n if len(cdim_tuple) > 1:\n raise ValueError('Multi dimensional coordinates are not supported')\n\n cdim = cdim_tuple[0]\n\n new_dims[cdim].append(coord)\n\n for dim, coords in new_dims.items():\n array = array.set_index({cdim: tuple(coords)}) # type: ignore[assignment]\n\n if len(coords) > 1:\n array = array.unstack(dim)\n\n return array.drop_vars(array.coords.keys() - set(array.dims))", "def data_array_dask_df(self):\n\n # Metadata for group by operation in dask.dd.groupby\n meta = pd.DataFrame({\n 'time': pd.Series([], dtype='<M8[ns]'),\n 'lat': pd.Series([], dtype='float'),\n 'lon': pd.Series([], dtype='float'),\n 't2m': pd.Series([], dtype='float'),\n 'area_grid': pd.Series([], dtype='float'),\n 'temp_bucket': pd.Series([], dtype='float'),\n })\n\n self.data_array['area_grid'] = self._calculate_area_from_latitude(\n self.data_array.lat\n )\n\n # Calculate window operation if selected\n if self.moving_window_size is not None:\n if isinstance(self.moving_window_size, int):\n window_array = (\n self.data_array[self.temp_var].\n rolling(time=self.moving_window_size,\n center=False,\n min_periods=self.moving_window_size\n )\n )\n else:\n raise NotImplementedError\n\n window_arrays = []\n for label, array_window in window_array:\n bucket_array = self._create_bucket_window(\n w_arr=array_window,\n label_time=label)\n window_arrays.append(bucket_array)\n\n lazy_results = dask.compute(*window_arrays[self.moving_window_size:])\n lazy_results_no_none = [r for r in lazy_results if r is not None]\n\n self.data_array['temp_bucket'] = xr.concat(lazy_results_no_none,\n dim='time')\n\n #return unified chunks since window changed chunks\n return (\n self.data_array\n .unify_chunks()\n .to_dask_dataframe(dim_order=self.DIMS)\n )\n\n else:\n # Yield dask.dataframe and process groupby operation\n array_ddf = self.data_array.to_dask_dataframe(dim_order=self.DIMS)\n array_ddf_transform = (\n array_ddf\n .groupby(['time'])\n .apply(self._bucket_builder_ddf,\n meta=meta)\n )\n\n return array_ddf_transform", "def _pad_array(da, dim, left=False, boundary=None, fill_value=0.0):\n\n if boundary not in [\"fill\", \"extend\"]:\n raise ValueError(\"`boundary` must be `'fill'` or `'extend'`\")\n\n axis_num = da.get_axis_num(dim)\n shape = list(da.shape)\n shape[axis_num] = 1\n\n base_array = da.data\n index = slice(0, 1) if left else slice(-1, None)\n edge_array = da.isel(**{dim: index}).data\n\n use_dask = has_dask and isinstance(base_array, dsa.Array)\n\n if boundary == \"extend\":\n boundary_array = edge_array\n elif boundary == \"fill\":\n args = shape, fill_value\n kwargs = {\"dtype\": base_array.dtype}\n if use_dask:\n full_func = dsa.full\n kwargs[\"chunks\"] = edge_array.chunks\n else:\n full_func = np.full\n boundary_array = full_func(*args, **kwargs)\n\n arrays_to_concat = [base_array, boundary_array]\n if left:\n arrays_to_concat.reverse()\n\n return concatenate(arrays_to_concat, axis=axis_num)", "def index_xarray_data():\n pressure = xr.DataArray([850., 700., 500.], dims=('isobaric',), attrs={'units': 'hPa'})\n temp = xr.DataArray([[[[296., 295., 294.], [293., 292., 291.]],\n [[286., 285., 284.], [283., 282., 281.]],\n [[276., 275., 274.], [273., 272., 271.]]]] * units.K,\n dims=('time', 'isobaric', 'y', 'x'))\n\n profile = xr.DataArray([[[[289., 288., 287.], [286., 285., 284.]],\n [[279., 278., 277.], [276., 275., 274.]],\n [[269., 268., 267.], [266., 265., 264.]]]] * units.K,\n dims=('time', 'isobaric', 'y', 'x'))\n\n dewp = xr.DataArray([[[[294., 293., 292.], [291., 290., 289.]],\n [[284., 283., 282.], [281., 280., 279.]],\n [[274., 273., 272.], [271., 270., 269.]]]] * units.K,\n dims=('time', 'isobaric', 'y', 'x'))\n\n dirw = xr.DataArray([[[[180., 180., 180.], [180., 180., 180.]],\n [[225., 225., 225.], [225., 225., 225.]],\n [[270., 270., 270.], [270., 270., 270.]]]] * units.degree,\n dims=('time', 'isobaric', 'y', 'x'))\n\n speed = xr.DataArray([[[[20., 20., 20.], [20., 20., 20.]],\n [[25., 25., 25.], [25., 25., 25.]],\n [[50., 50., 50.], [50., 50., 50.]]]] * units.knots,\n dims=('time', 'isobaric', 'y', 'x'))\n\n return xr.Dataset({'temperature': temp, 'profile': profile, 'dewpoint': dewp,\n 'wind_direction': dirw, 'wind_speed': speed},\n coords={'isobaric': pressure, 'time': ['2020-01-01T00:00Z']})", "def data_array(self) -> xr.Dataset:\n\n xr_data = xr.open_mfdataset(self.path_to_files,\n chunks=self.chunks,\n parallel=True)\n\n if not all(x in list(xr_data.coords) for x in self.DIMS):\n xr_data = xr_data.rename({\n 'latitude': 'lat',\n 'longitude': 'lon',\n })\n\n if self.subset_dict is not None:\n print(f'Cutting data using {self.subset_dict}')\n xr_data = self.cut(xr_data)\n\n if self.season is not None:\n xr_data = xr_data.where(xr_data.time.dt.season == self.season,\n drop=True)\n\n if self.rescale_longitude is True:\n xr_data = xr_data.assign_coords(lon=(((xr_data.lon + 180) % 360) -\n 180)).sortby('lon')\n\n return xr_data", "def test_slice_other_dimension(self):\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[:1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (1,)+shape[1:])", "def initializeDomainCondition(self):\n print('Initialize the condition.')\n\n self.fluidPDF = np.zeros([self.typesFluids, self.ny, self.nx, 9])\n self.fluidsDensity = np.zeros([self.typesFluids, self.ny, self.nx])\n self.physicalVX = np.zeros([self.ny, self.nx])\n self.physicalVY = np.zeros([self.ny, self.nx])\n self.forceX = np.zeros([self.typesFluids, self.ny, self.nx])\n self.forceY = np.zeros([self.typesFluids, self.ny, self.nx])\n if (self.PictureExistance == \"'no'\"):\n for i in sp.arange(self.ny):\n for j in sp.arange(self.nx):\n# for k in sp.arange(self.typesFluids):\n tmpCenterX = int(self.nx / 2); tmpCenterY = int(self.ny / 2)\n if (self.isDomain[i, j] == True):\n# if (sp.sqrt((i - tmpCenterY) * (i - tmpCenterY) + (j - \\\n# tmpCenterX) * (j - tmpCenterX)) <= 15.):\n# if (i < 15 and np.abs(j - tmpCenterX) < 15):\n# if ((i >0 and i < 28) and (j >=102 and j < 154)):\n if (i < self.ny - 10):\n# if (i < 128 and i > 70):\n self.fluidsDensity[0, i, j] = self.initialDensities[0]\n self.fluidPDF[0, i, j, :] = self.weightsCoeff * self.initialDensities[0]\n self.fluidsDensity[1, i, j] = self.backgroundDensities[1]\n self.fluidPDF[1, i, j, :] = self.weightsCoeff * self.backgroundDensities[1]\n else:\n self.fluidsDensity[1, i, j] = self.initialDensities[1]\n self.fluidPDF[1, i, j, :] = self.weightsCoeff * self.initialDensities[1]\n self.fluidsDensity[0, i, j] = self.backgroundDensities[0]\n self.fluidPDF[0, i, j, :] = self.weightsCoeff * self.backgroundDensities[0] \n \n if (self.isCycles == \"'no'\" and self.PictureExistance == \"'yes'\"):\n for i in sp.arange(self.ny):\n for j in sp.arange(self.nx):\n if (i < self.ny - 20):\n # if ( np.abs(i - 60) < 20):\n for k in sp.arange(self.typesFluids):\n if (k == 0 and self.isDomain[i, j] == 1):\n self.fluidPDF[k, i, j, :] = self.initialDensities[k] * self.weightsCoeff\n self.fluidsDensity[k, i, j] = self.initialDensities[k]\n if (k == 1 and self.isDomain[i, j] == 1):\n self.fluidPDF[k, i, j, :] = self.backgroundDensities[k] * self.weightsCoeff\n self.fluidsDensity[k, i, j] = self.backgroundDensities[k]\n else:\n for k in sp.arange(self.typesFluids):\n if (k == 0 and self.isDomain[i, j] == 1):\n self.fluidPDF[k, i, j, :] = self.backgroundDensities[k] * self.weightsCoeff\n self.fluidsDensity[k, i, j] = self.backgroundDensities[k]\n if (k == 1 and self.isDomain[i, j] == 1):\n self.fluidPDF[k, i, j, :] = self.initialDensities[k] * self.weightsCoeff\n self.fluidsDensity[k, i, j] = self.initialDensities[k]\n elif (self.isCycles == \"'yes'\" and self.PictureExistance == \"'yes'\"):\n username = getpass.getuser()\n pathIniFile = '/home/' + username + '/LBMInitial/'\n if (os.path.exists(pathIniFile) == True): \n #for the old fluid distribution\n #the domain of the network\n iniFile = tb.open_file(pathIniFile + 'SimulationResults.h5', 'r')\n for i in sp.arange(self.typesFluids-1):\n self.fluidsDensity[i, :-30, :] = eval('iniFile.root.FluidMacro.FluidDensityType%gin%d[:-30, :]' % (i, self.lastStep))\n self.fluidsDensity[i, -30:, :] = self.backgroundDensities[i]\n for j in sp.arange(self.ny):\n for k in sp.arange(self.nx):\n self.fluidPDF[i, j, k, :] = self.weightsCoeff * \\\n self.fluidsDensity[i, j, k]\n iniFile.close()\n# for the new fluid in the domain\n for i in sp.arange(self.ny):\n for j in sp.arange(self.nx):\n if (i < self.ny - 30 and self.isDomain[i, j] == 1):\n self.fluidsDensity[-1, i, j] = self.backgroundDensities[-1]\n self.fluidPDF[-1, i, j, :] = self.backgroundDensities[-1] * \\\n self.weightsCoeff\n# continue\n elif (i >= self.ny - 30 and self.isDomain[i, j] == 1):\n self.fluidsDensity[-1, i, j] = self.initialDensities[-1]\n self.fluidPDF[-1, i, j, :] = self.initialDensities[-1] * \\\n self.weightsCoeff\n else:\n print(\"There is no file for initializing the domain.\")\n sys.exit()", "def assign_dx_array(self):\n\n #If dx is not defined by user, compute a uniform dx\n if 'delta x' not in self.input_data['numerical']:\n length = self.res_length\n dx = np.float(length) / self.ngrids\n dx_arr = np.ones(self.ngrids) * dx\n else:\n #Convert to numpy array and ensure that the length of \n #dx matches ngrids\n dx_arr = np.array(self.input_data['numerical']['delta x'], \n dtype=np.double)\n\n length_dx_arr = dx_arr.shape[0]\n \n #For user input 'delta x' array, we need to ensure that its size\n #agress with ngrids as determined from permeability/porosity values\n assert length_dx_arr == self.ngrids, (\"User defined 'delta x' array \\\n doesn't match 'number of grids'\")\n\n return dx_arr", "def apply_mk(da):\n mk = xr.apply_ufunc(mk_nan_wrap, da, input_core_dims=[['time']], output_core_dims=[[], [], []], dask='allowed', vectorize=True)\n ds = mk[0].to_dataset(name='S').merge(mk[1].to_dataset(name='ts_slope')).merge(mk[2].to_dataset(name='ts_intercept'))\n \n return ds" ]
[ "0.55187356", "0.5427349", "0.51378554", "0.5105014", "0.510499", "0.50856996", "0.5008775", "0.49854606", "0.49854606", "0.49708956", "0.49670136", "0.4929073", "0.49227515", "0.4882665", "0.48811394", "0.48774448", "0.4864481", "0.4864481", "0.48592335", "0.48482525", "0.48313722", "0.4828929", "0.48259598", "0.4817091", "0.48122898", "0.47962365", "0.4794919", "0.47921494", "0.4780768", "0.47737688" ]
0.62297755
0
Pad an xarray.DataArray da according to the boundary conditions along dim. Return a raw dask or numpy array, depending on the underlying data.
def _pad_array(da, dim, left=False, boundary=None, fill_value=0.0): if boundary not in ["fill", "extend"]: raise ValueError("`boundary` must be `'fill'` or `'extend'`") axis_num = da.get_axis_num(dim) shape = list(da.shape) shape[axis_num] = 1 base_array = da.data index = slice(0, 1) if left else slice(-1, None) edge_array = da.isel(**{dim: index}).data use_dask = has_dask and isinstance(base_array, dsa.Array) if boundary == "extend": boundary_array = edge_array elif boundary == "fill": args = shape, fill_value kwargs = {"dtype": base_array.dtype} if use_dask: full_func = dsa.full kwargs["chunks"] = edge_array.chunks else: full_func = np.full boundary_array = full_func(*args, **kwargs) arrays_to_concat = [base_array, boundary_array] if left: arrays_to_concat.reverse() return concatenate(arrays_to_concat, axis=axis_num)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def padding(a, dim):\n\n return np.pad(a, (0, dim-len(a)), 'constant', constant_values=(0))", "def pad(data, maxPadLength=False):\n data_padded = data.copy()\n bad_indexes = np.isnan(data)\n good_indexes = np.logical_not(bad_indexes)\n good_data = data[good_indexes]\n interpolated = np.interp(\n bad_indexes.nonzero()[0], good_indexes.nonzero()[0], good_data\n )\n data_padded[bad_indexes] = interpolated\n if maxPadLength:\n blocks, n_blocks = ndimage.label(np.isnan(data))\n for bl in range(1, n_blocks + 1):\n # if greater than max pad length then keep as nan\n # i.e. don't interpolate over too large a range\n if (blocks == bl).sum() > maxPadLength:\n data_padded[blocks == bl] = np.nan\n\n return data_padded", "def make_test_dataArray():\n x = np.zeros(shape=(3,31))\n x[0,:] = np.nan\n x[1,[1,2,3,4,5,6,15,23,24,25]] = [np.nan,np.nan,0.1,0.5,2.,2.,2.,2.,0.9,2.]\n x[2,[3,4,5,6,15,23,24,25]] = [0.1,0.5,2.,2.,2.,2.,0.9,2.]\n da = xr.DataArray(x, dims=['x','time'])\n da.coords['time'] = pd.date_range('19790101', freq='D', periods=31)\n return da", "def ndpad(a,npad=None,padval=0):\n\n if npad == None:\n npad = np.ones(a.ndim)\n elif np.isscalar(npad):\n npad = (npad,)*a.ndim\n elif len(npad) != a.ndim:\n raise Exception('Length of npad (%i) does not match the '\\\n 'dimensionality of the input array (%i)'\n %(len(npad),a.ndim))\n\n # initialise padded output\n padsize = [a.shape[dd]+2*npad[dd] for dd in xrange(a.ndim)]\n b = np.ones(padsize,a.dtype)*padval\n\n # construct an N-dimensional list of slice objects\n ind = [slice(np.floor(npad[dd]),a.shape[dd]+np.floor(npad[dd])) for dd in xrange(a.ndim)]\n\n # fill in the non-pad part of the array\n b[ind] = a\n return b", "def unsqueeze_data_array(da, dim, pos, coord=None, attrs=None):\n new_dims = list(da.dims)\n new_dims.insert(pos, dim)\n new_shape = da.data.shape[:pos] + (1,) + da.data.shape[pos:]\n new_data = da.data.reshape(new_shape)\n new_coords = {k: v for k, v in da.coords.items()}\n if coord:\n new_coords[dim] = xarray.DataArray([coord], dims=[dim])\n if attrs:\n new_coords[dim].attrs.update(attrs)\n return xarray.DataArray(new_data, dims=new_dims, coords=new_coords, attrs=da.attrs)", "def left_dimension_pad(array, n):\n if array.ndim >= n:\n return array\n nadd = n - array.ndim\n atts = [_new_attribute_label('_dim%i' % i, array) for i in range(nadd)]\n apply_args = [x for item in enumerate(atts) for x in item[::-1]]\n\n ds = array.datashape.copy()\n ds.dim_low = ([0] * nadd) + list(ds.dim_low)\n ds.dim_high = ([0] * nadd) + list(ds.dim_high)\n ds.dim_names = atts + list(ds.dim_names)\n ds.chunk_overlap = ([0] * nadd) + list(ds.chunk_overlap)\n ds.chunk_size = ([1000] * nadd) + list(ds.chunk_size)\n\n return array.apply(*apply_args).redimension(ds.schema)", "def _pad_data(data, pad_length, padding_type='same'):\n\n # get the sampling period (or distance between sampling points, for PLUX devices this is always 1)\n # it is assumed that the signals are equidistantly sampled therefore only the distance between to sampling points\n # is needed to calculate the sampling period\n T = data[:, 0][1] - data[:, 0][0]\n\n if padding_type == 'same':\n\n # create the 'same' padding array\n padding = np.tile(data[-1, 1:], (pad_length, 1))\n\n elif padding_type == 'zero':\n\n # get the number of columns for the zero padding\n num_cols = data.shape[1] - 1 # ignoring the time/sample column\n\n # create the zero padding array\n padding = np.zeros((pad_length, num_cols))\n\n else:\n\n IOError('The padding type you chose is not defined. Use either \\'same\\ or \\'zero\\'.')\n\n # create the time / sample axis that needs to be padded\n start = data[:, 0][-1] + T\n stop = start + (T * pad_length)\n time_pad = np.arange(start, stop, T)\n time_pad = time_pad[:pad_length] # crop the array if there are to many values\n\n # expand dimension for hstack operation\n time_pad = np.expand_dims(time_pad, axis=1)\n\n # hstack the time_pad and the zero_pad to get the final padding array\n pad_array = np.hstack((time_pad, padding))\n\n # vstack the pad_array and the new_array\n padded_data = np.vstack([data, pad_array])\n\n return padded_data", "def zero_pad(X, padding_width, dims):\n dims = (dims) if isinstance(dims, int) else dims\n pad = [(0, 0) if idx not in dims else (padding_width, padding_width)\n for idx in range(len(X.shape))]\n X_padded = np.pad(X, pad, 'constant')\n return X_padded", "def _unpad(self, a, axis, out):\n\n if a.shape[axis] == self.Nin:\n return a\n elif a.shape[axis] != self.N:\n raise ValueError(\"array much be of size N or len(x)\")\n\n Npad = self.N - self.Nin\n if out:\n _Npad, Npad_ = Npad - Npad//2, Npad//2\n else:\n _Npad, Npad_ = Npad//2, Npad - Npad//2\n\n return np.take(a, range(_Npad, self.N - Npad_), axis=axis)", "def procrustes(a,target,side='both',padval=0):\n\n try:\n if len(target) != a.ndim:\n raise TypeError('Target shape must have the same number of dimensions as the input')\n except TypeError:\n raise TypeError('Target must be array-like')\n\n try:\n b = np.ones(target,a.dtype)*padval\n except TypeError:\n raise TypeError('Pad value must be numeric')\n except ValueError:\n raise ValueError('Pad value must be scalar')\n\n aind = [slice(None,None)]*a.ndim\n bind = [slice(None,None)]*a.ndim\n\n # pad/trim comes after the array in each dimension\n if side == 'after':\n for dd in xrange(a.ndim):\n if a.shape[dd] > target[dd]:\n aind[dd] = slice(None,target[dd])\n elif a.shape[dd] < target[dd]:\n bind[dd] = slice(None,a.shape[dd])\n\n # pad/trim comes before the array in each dimension\n elif side == 'before':\n for dd in xrange(a.ndim):\n if a.shape[dd] > target[dd]:\n aind[dd] = slice(a.shape[dd]-target[dd],None)\n elif a.shape[dd] < target[dd]:\n bind[dd] = slice(target[dd]-a.shape[dd],None)\n\n # pad/trim both sides of the array in each dimension\n elif side == 'both':\n for dd in xrange(a.ndim):\n if a.shape[dd] > target[dd]:\n diff = (a.shape[dd]-target[dd])/2.\n aind[dd] = slice(np.floor(diff),a.shape[dd]-np.ceil(diff))\n elif a.shape[dd] < target[dd]:\n diff = (target[dd]-a.shape[dd])/2.\n bind[dd] = slice(np.floor(diff),target[dd]-np.ceil(diff))\n\n else:\n raise Exception('Invalid choice of pad type: %s' %side)\n\n b[bind] = a[aind]\n\n return b", "def test_pad1():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = [0, 0, 0, 0, 0, 0, 1, 1, 0, 0]\n mode = \"constant\"\n value = 0\n res = np.array([[[[[0.0, 0.0, 0.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [0.0, 0.0, 0.0]]]]])\n # data_format = \"NCDHW\"\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value)", "def pad(self, pad_width, mode=\"constant\", constant_values=0):\r\n destination = np.zeros(\r\n (\r\n self.dataset.count,\r\n self.__arr.shape[1] + 2 * pad_width,\r\n self.__arr.shape[2] + 2 * pad_width,\r\n ),\r\n self.__arr.dtype,\r\n )\r\n\r\n for i in range(0, self.dataset.count):\r\n destination[i], transform = rasterio.pad(\r\n self.__arr[i],\r\n self.dataset.transform,\r\n pad_width,\r\n mode,\r\n constant_values=constant_values,\r\n )\r\n\r\n self.__arr = destination\r\n self.__update_dataset(self.dataset.crs, transform, nodata=self.dataset.nodata)", "def _pad_simple(array, pad_width, fill_value=None):\n # Allocate grown array\n new_shape = tuple(\n left + size + right\n for size, (left, right) in zip(array.shape, pad_width)\n )\n order = 'F' if array.flags.fnc else 'C' # Fortran and not also C-order\n padded = np.empty(new_shape, dtype=array.dtype, order=order)\n\n if fill_value is not None:\n padded.fill(fill_value)\n\n # Copy old array into correct space\n original_area_slice = tuple(\n slice(left, left + size)\n for size, (left, right) in zip(array.shape, pad_width)\n )\n padded[original_area_slice] = array\n\n return padded, original_area_slice", "def _apply_boundary_condition(da, dim, left, boundary=None, fill_value=0.0):\n\n if boundary not in [\"fill\", \"extend\", \"extrapolate\"]:\n raise ValueError(\n \"`boundary` must be 'fill', 'extend' or \"\n \"'extrapolate', not %r.\" % boundary\n )\n\n axis_num = da.get_axis_num(dim)\n\n # the shape for the edge array\n shape = list(da.shape)\n shape[axis_num] = 1\n\n base_array = da.data\n index = slice(0, 1) if left else slice(-1, None)\n edge_array = da.isel(**{dim: index}).data\n\n use_dask = has_dask and isinstance(base_array, dsa.Array)\n\n if boundary == \"extend\":\n boundary_array = edge_array\n elif boundary == \"fill\":\n args = shape, fill_value\n kwargs = {\"dtype\": base_array.dtype}\n if use_dask:\n full_func = dsa.full\n kwargs[\"chunks\"] = edge_array.chunks\n else:\n full_func = np.full\n boundary_array = full_func(*args, **kwargs)\n elif boundary == \"extrapolate\":\n gradient_slice = slice(0, 2) if left else slice(-2, None)\n gradient_sign = -1 if left else 1\n linear_gradient = da.isel(**{dim: gradient_slice}).diff(dim=dim).data\n boundary_array = edge_array + gradient_sign * linear_gradient\n\n return boundary_array", "def right_dimension_pad(array, n):\n if array.ndim >= n:\n return array\n\n nadd = n - array.ndim\n atts = [_new_attribute_label('_dim%i' % i, array) for i in range(nadd)]\n apply_args = [x for item in enumerate(atts) for x in item[::-1]]\n\n ds = array.datashape.copy()\n ds.dim_low = list(ds.dim_low) + ([0] * nadd)\n ds.dim_high = list(ds.dim_high) + ([0] * nadd)\n ds.dim_names = list(ds.dim_names) + atts\n ds.chunk_overlap = list(ds.chunk_overlap) + ([0] * nadd)\n ds.chunk_size = list(ds.chunk_size) + ([1000] * nadd)\n\n return array.apply(*apply_args).redimension(ds.schema)", "def pad1D(X, pad, kernel_width=None, stride=None, dilation=0):\n\tX_pad = None\n\tp = pad\n\tif isinstance(p, int):\n\t\tp = (p, p)\n\tif isinstance(p, tuple):\n\t\tX_pad = np.pad(\n\t\t\tX, \n\t\t\tpad_width=((0, 0), (p[0], p[1]), (0, 0)),\n\t\t\tmode='constant',\n\t\t\t# constant_value = 0,\n\t\t\t)\n\n\t# compute the correct padding dims for a 'same' or 'causal' convolution\n\tif p in ['same', 'causal'] and kernel_width and stride:\n\t\tcausal = p == 'causal'\n\t\tp = calc_pad_dims_1D(\n\t\t\tX.shape, X.shape[1], kernel_width, stride, causal=causal, dilation=dilation\n\t\t\t)\n\t\tX_pad, p = pad1D(X, p)\n\n\treturn X_pad, p", "def _pad(self, a, axis, extrap, out):\n\n if a.shape[axis] == self.N:\n return a\n elif a.shape[axis] != self.Nin:\n raise ValueError(\"array much be of size len(x) or N\")\n\n axis %= a.ndim # to fix the indexing below with axis+1\n\n to_axis = [1] * a.ndim\n to_axis[axis] = -1\n\n Npad = self.N - self.Nin\n if out:\n _Npad, Npad_ = Npad - Npad//2, Npad//2\n else:\n _Npad, Npad_ = Npad//2, Npad - Npad//2\n\n try:\n _extrap, extrap_ = extrap\n except (TypeError, ValueError):\n _extrap = extrap_ = extrap\n\n if isinstance(_extrap, bool):\n if _extrap:\n end = np.take(a, [0], axis=axis)\n ratio = np.take(a, [1], axis=axis) / end\n exp = np.arange(-_Npad, 0).reshape(to_axis)\n _a = end * ratio ** exp\n else:\n _a = np.zeros(a.shape[:axis] + (_Npad,) + a.shape[axis+1:])\n elif _extrap == 'const':\n end = np.take(a, [0], axis=axis)\n _a = np.repeat(end, _Npad, axis=axis)\n else:\n raise ValueError(\"left extrap not supported\")\n if isinstance(extrap_, bool):\n if extrap_:\n end = np.take(a, [-1], axis=axis)\n ratio = end / np.take(a, [-2], axis=axis)\n exp = np.arange(1, Npad_ + 1).reshape(to_axis)\n a_ = end * ratio ** exp\n else:\n a_ = np.zeros(a.shape[:axis] + (Npad_,) + a.shape[axis+1:])\n elif extrap_ == 'const':\n end = np.take(a, [-1], axis=axis)\n a_ = np.repeat(end, Npad_, axis=axis)\n else:\n raise ValueError(\"right extrap not supported\")\n\n return np.concatenate((_a, a, a_), axis=axis)", "def _pad(x, depth=4):\n divisor = np.power(2, depth)\n remainder = x.shape[0] % divisor\n\n # no padding because already of even shape\n if remainder == 0:\n return x\n # add zero rows after 1D feature\n elif len(x.shape) == 2:\n return np.pad(x, [(0, divisor - remainder), (0, 0)], \"constant\")\n # add zero columns and rows after 2D feature\n elif len(x.shape) == 3:\n return np.pad(x, [(0, divisor - remainder), (0, divisor - remainder),\n (0, 0)], \"constant\")", "def test_pad():\n x = randtool(\"float\", -10, 10, [3, 2, 1, 2])\n pad = [1, 1, 2, 3]\n mode = \"constant\"\n value = 2.0\n data_format = \"NCHW\"\n res = np.array(\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.88523461, 1.99072967, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 4.45995261, 9.40579439, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 6.43138915, 0.55102135, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -3.37046541, -2.92035609, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.41939397, 1.11828761, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -6.68411074, -4.09524338, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def pad(input_data):\n # source : https://stackoverflow.com/questions/6518811/interpolate-nan-values-in-a-numpy-array \n data = input_data.copy()\n bad_indexes = np.isnan(data)\n good_indexes = np.logical_not(bad_indexes)\n good_data = data[good_indexes]\n interpolated = np.interp(bad_indexes.nonzero()[0], good_indexes.nonzero()[0], good_data)\n data[bad_indexes] = interpolated\n return data", "def test_pad2():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (0, 1, 1, 1, 2, 0)\n mode = \"constant\"\n value = 0\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n [[0.0, 0.0, 0.0, 0.0], [1.0, 2.0, 3.0, 0.0], [4.0, 5.0, 6.0, 0.0], [0.0, 0.0, 0.0, 0.0]],\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def full_like(\n other: xr.DataArray, nodata: float = None, lazy: bool = False\n) -> xr.DataArray:\n if not isinstance(other, xr.DataArray):\n raise ValueError(\"other should be xarray.DataArray.\")\n if nodata is None:\n nodata = other.raster.nodata if other.raster.nodata is not None else np.nan\n da = full(\n coords={d: c for d, c in other.coords.items() if d in other.dims},\n nodata=nodata,\n dtype=other.dtype,\n name=other.name,\n attrs=other.attrs,\n crs=other.raster.crs,\n lazy=lazy,\n shape=other.shape,\n dims=other.dims,\n )\n da.raster.set_attrs(**other.raster.attrs)\n return da", "def test_pad7():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (2, 2, 1, 1, 0, 0)\n mode = \"reflect\"\n data_format = \"NCDHW\"\n res = np.array(\n [\n [\n [\n [\n [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0],\n [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0],\n [6.0, 5.0, 4.0, 5.0, 6.0, 5.0, 4.0],\n [3.0, 2.0, 1.0, 2.0, 3.0, 2.0, 1.0],\n ]\n ]\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, data_format=data_format)", "def promote_empty_dims(ds):\n ds = ds.copy()\n for di in ds.dims:\n if di not in ds.coords:\n ds.coords[di] = ds[di]\n return ds", "def _padding(self, x, shape, value=0):\n row_padding = shape[0] - x.shape[0]\n col_padding = shape[1] - x.shape[1]\n return np.pad(x, [[0, row_padding], [0, col_padding]], mode=\"constant\", constant_values=value)", "def test_pad6():\n x = np.array([[[[[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]]]])\n pad = (2, 1, 3, 0, 2, 0)\n mode = \"replicate\"\n data_format = \"NDHWC\"\n res = np.array(\n [\n [\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n [\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n [[1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [1.0, 2.0, 3.0], [4.0, 5.0, 6.0], [4.0, 5.0, 6.0]],\n ],\n ]\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, data_format=data_format)", "def np_pad(x, list_thresh):\n x = np.array(x)\n x = np.pad(x, pad_width = ((0,0),(0,list_thresh-x.shape[1])), mode=\"constant\", constant_values=0)\n return x", "def _pad1d(self, x: torch.Tensor, padding_left: int, padding_right: int, mode: str = \"zero\", value: float = 0.0):\n length = x.shape[-1]\n if mode == \"reflect\":\n max_pad = max(padding_left, padding_right)\n if length <= max_pad:\n x = F.pad(x, (0, max_pad - length + 1))\n return F.pad(x, (padding_left, padding_right), mode, value)", "def full(\n coords,\n nodata=np.nan,\n dtype=np.float32,\n name=None,\n attrs={},\n crs=None,\n lazy=False,\n shape=None,\n dims=None,\n) -> xr.DataArray:\n f = dask.array.empty if lazy else np.full\n if dims is None:\n dims = tuple([d for d in coords])\n if shape is None:\n cs = next(iter(coords.values())) # get first coordinate\n if cs.ndim == 1:\n shape = tuple([coords[dim].size for dim in dims])\n else: # rotated\n shape = cs.shape\n if hasattr(cs, \"dims\"):\n dims = cs.dims\n data = f(shape, nodata, dtype=dtype)\n da = xr.DataArray(data, coords, dims, name, attrs)\n da.raster.set_nodata(nodata)\n da.raster.set_crs(crs)\n return da", "def temporal_padding(buffer, clip_len):\n if buffer.shape[0] > clip_len:\n pass\n else:\n pad_len = clip_len - buffer.shape[0] + 1\n npad = ((pad_len, 0), (0, 0), (0, 0), (0,0))\n # buffer = np.pad(buffer, pad_width=npad, mode='constant', constant_values=0)\n buffer = np.pad(buffer, pad_width=npad, mode='mean')\n assert buffer.shape[0] - clip_len > 0, \"Incorrect Padding\"\n return buffer" ]
[ "0.5916931", "0.555809", "0.55494297", "0.5496643", "0.54872304", "0.5461839", "0.5435356", "0.5432517", "0.53927624", "0.53607583", "0.5343201", "0.5320598", "0.5312922", "0.52983654", "0.52979755", "0.5248074", "0.52351177", "0.5210277", "0.5204645", "0.5204212", "0.5164592", "0.509981", "0.50868195", "0.50840265", "0.5071875", "0.5061395", "0.5053385", "0.5033746", "0.5024222", "0.50217074" ]
0.7270703
0
Function monitors IP traffic in real time using Scapy's sniff function. If an exception occurres it will run again in 5 seconds.
def run_ip_traffic_monitor(db_connection, net_interfaces, device_id, agent_ip): while 1: try: arp_traffic_out = sniff(prn = ip_monitoring_callback(db_connection, net_interfaces, device_id, agent_ip), filter = "ip", store = 0); except: utc_time_now = str(datetime.utcnow()) print('[' + utc_time_now + '] ' + 'An exception in ip_monitoring_callback function') # Just random sleep between 1 and 5 seconds before starting next round. # This will happen if an exception occurs in ip_monitoring_callback. random_seconds = random.randint(1, 6) time.sleep(random_seconds)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_forever(self):\n scapy.sniff(prn=self.arp_cb, filter=\"arp\", store=0, count=0)", "def sniffing():\n sniff(store=False, prn=lambda p: threading.Thread(target=next, args=(p,)).start(), iface=IFACE)", "def sniff_online(args):\n print('viewer: listening on ' + args.interface)\n\n try:\n sniffer = pcapy.open_live(args.interface, 65536, 1, 1)\n sniffer.setfilter('icmp')\n except Exception as e:\n print(e)\n sys.exit(-1)\n\n if not args.count:\n count = True\n else:\n count = args.count\n\n while count:\n (header, packet) = sniffer.next()\n if header:\n tts = header.getts()\n ret = parse_ICMP_Echo(tts[0] + tts[1] / 1000000, packet)\n\n if ret and args.count:\n count -= 1", "def sniffer():\n try:\n sniff(iface=INTERFACE, prn=print_frame, filter='udp and (port bootps or bootps)', store=0)\n except Exception as _e:\n print(\"ERROR - sniffer(): {} {}\".format(_e.args, _e.message))", "def sniff_traffic(hs, count, timeout, traffic_type, pkt_type, exp_dst, step):\n iface = hs.ports['eth1']\n step('Scapy capture started')\n if (traffic_type == \"encap\"):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, timeout={}, \"\n \" filter='port 4789 and (!icmp or !ip6)', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, traffic_type, pkt_type, exp_dst, step=step)\n elif (traffic_type == \"decap\"):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, \"\n \" timeout={}, filter='!icmp or !ip6', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, traffic_type, pkt_type, exp_dst, step=step)", "def sniffing():\n sniff(store=False, prn=lambda p: threading.Thread(target=sendHash, args=(p,)).start(), iface=IFACE)", "def ip_monitoring_callback(db_connection, net_interfaces, device_id, host_ip):\n\n # For example, if 10.2.x.x talks to 10.1.x.x that is fine.\n subnet_ip_list = []\n for data in net_interfaces:\n local_ip = data['ip']\n local_ip = local_ip.split('.')\n subnet_ip_list.append(str(local_ip[0]))\n\n host_parts = host_ip.split('.')\n host_parts = str(host_parts[0]) + '.' + str(host_parts[1])\n subnet_ip_list.append(host_parts)\n\n \"\"\"\n Note that if I want to pass parameters into the sniff's custom_action function for\n additional control or the ability to modularize out the custom_action function,\n I have to use a nested function.\n \"\"\"\n def upload_packet(packet):\n utc_time_now = utils.get_unix_epoch_milliseconds()\n\n if (packet and packet[0][1].src and packet[0][1].dst and\n str(packet[0][1].dst) != '255.255.255.255' and str(packet[0][1].src) != '255.255.255.255'):\n\n source_ip = packet[0][1].src\n destination_ip = packet[0][1].dst\n\n # Skip localhost and multicast DNS\n if (source_ip != '127.0.0.1' and destination_ip != '127.0.0.1' and\n source_ip != '239.255.255.250' and destination_ip != '239.255.255.250' and\n source_ip != '224.0.0.251' and destination_ip != '224.0.0.251'):\n\n src_parts = source_ip.split('.')\n src_parts = str(src_parts[0])\n dst_parts = destination_ip.split('.')\n dst_parts = str(dst_parts[0])\n\n if (src_parts in subnet_ip_list) and (dst_parts in subnet_ip_list):\n event = 'IP traffic is inside of local network'\n # Not sure if it makes sense to store this data on the database.\n #ip_traffic_data = {'ts': utc_time_now, 'src': source_ip, 'dst': destination_ip, 'not_local_ip': ''}\n else:\n not_local_ip = source_ip\n if src_parts in subnet_ip_list:\n not_local_ip = destination_ip\n\n ip_traffic_data = {'ts': utc_time_now, 'src': source_ip, 'dst': destination_ip, 'not_local_ip': not_local_ip}\n db.insert_ip_traffic_data(db_connection, ip_traffic_data)\n\n\n return upload_packet", "def capture(self, subnet, n=100):\n\n\t\tcapture_filter = \"ip and src net \" + subnet\n\t\tpackets = sniff(iface=self.iface, prn=self.callback, count=n, filter=capture_filter, store=0)\n\t\twrpcap(\"sniff.pcap\", packets)", "def run(self):\n print \"Starting Packet Sniffer on [ %s ]:[ %s ]...\" % (self.ifname, self.packet_filter_string)\n self.socket = conf.L2listen(\n type=ETH_P_ALL,\n iface=self.ifname,\n filter=self.packet_filter_string\n )\n\n sniff(\n opened_socket=self.socket,\n #filter=self.packet_filter_string,\n lfilter=self.is_not_outgoing,\n # prn=self.print_packet,\n prn=self.sniffer_callback,\n stop_filter=self.should_stop_sniffer\n )", "def sniff_ip(time_to_sniff):\r\n ip_dict = dict()\r\n port_dict = dict()\r\n packets = sniff(timeout=time_to_sniff, filter=\"ip\")\r\n\r\n for i in packets:\r\n sport = 0\r\n src = i['IP'].src\r\n\r\n if \"TCP\" in i:\r\n sport = i['TCP'].sport\r\n\r\n elif \"UDP\" in i:\r\n sport = i['UDP'].sport\r\n\r\n if not src in ip_dict.keys():\r\n ip_dict[src] = 1\r\n\r\n else:\r\n ip_dict[src] += 1\r\n\r\n if sport:\r\n if not sport in port_dict.keys():\r\n port_dict[sport] = 1\r\n\r\n else:\r\n port_dict[sport] += 1\r\n\r\n return ip_dict, port_dict", "def track(self):\n scapy.all.sniff(prn = self.add)", "def sniff_packets(iface=None):\n if iface: # (http)\n sniff(filter=\"port 80\", prn=process_packet, iface=iface, store=False)\n # 'process_packet' is the callback\n else:\n sniff(filter=\"port 80\", prn=process_packet, store=False)\n # default interface", "def sniff_traffic(hs, count, timeout, recipient_type, pkt_type,\n exp_src, exp_dst, testlog):\n iface = hs.ports['eth1']\n\n # If host is NVP, sniff using a filter that checks for UDP packets\n if (\"NVP\" in recipient_type):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, timeout={}, \"\n \" filter='port 4789 and (!icmp or !ip6)', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, recipient_type, pkt_type,\n exp_src, exp_dst, testlog)\n # If host is AVP, sniff using a filter that checks for Ethernet packets\n elif (\"AVP\" in recipient_type):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, \"\n \" timeout={}, filter='!icmp or !ip6', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, recipient_type, pkt_type,\n exp_src, exp_dst, testlog)", "def background_catch_up():\n while True:\n time.sleep(interval)\n s = 'http://{0}:{1}'.format(args.host, port)\n req = urllib2.Request(s)\n try:\n response = urllib2.urlopen(req)\n response.read()\n except Exception as e:\n pass", "def query_sniff(pkt):\n if IP in pkt:\n ip_src = pkt[IP].src\n ip_dst = pkt[IP].dst\n\n if pkt.haslayer(DNS) and pkt.getlayer(DNS).qr == 0:\n domain = pkt.getlayer(DNS).qd.qname.decode(\"utf-8\")\n now = datetime.now()\n stored_dns_requests.update({datetime.timestamp(now): domain})\n print(\"SRC: {} - DST: {} : {}\".format(ip_src, ip_dst, domain))", "def start(self, timeout=None) -> None:\n self.sniff(lambda p: self._handle_packet_in(p), timeout=None)", "def begin_sending_packets():\n monitoru = main_monitoring.MainMonitoring()\n monitoru.start_monitor_loop()", "def sleeper5():\n print \"Threaded sleeper of 5 seconds\"\n sleep(5)", "def _wait_for_ip(name, session):\n start_time = datetime.now()\n status = None\n while status is None:\n status = get_vm_ip(name, session)\n if status is not None:\n # ignore APIPA address\n if status.startswith(\"169\"):\n status = None\n check_time = datetime.now()\n delta = check_time - start_time\n log.debug(\n \"Waited %s seconds for %s to report ip address...\", delta.seconds, name\n )\n if delta.seconds > 180:\n log.warning(\"Timeout getting IP address\")\n break\n time.sleep(5)", "def sniff_continuously(self, packet_count=None):\n \n self.lcapture_tshark = (self.lcapture_tshark or \n self.eventloop.run_until_complete(self._get_tshark_process()))\n\n self._running_processes.add(self.lcapture_tshark)\n\n # Retained for backwards compatibility and to add documentation.\n return self._packets_from_tshark_sync(packet_count=packet_count, \n tshark_process=self.lcapture_tshark)", "def watch_for_rtt_messages(self):\n while True:\n message = self.socket_manager.get_rtt_message()\n self.ensure_sender_is_known(message)\n if message.stage == \"0\":\n self.respond_to_rtt_message(message)\n elif message.stage == \"1\":\n self.handle_rtt_response(message)\n elif message.stage == \"2\":\n self.handle_rtt_broadcast(message)", "def run(self):\n rate = WallRate(self.ping_frequency)\n while True:\n # In case of failure, this call will take approx 10s\n try:\n # Send 5 pings at an interval of 0.2s\n output = subprocess.check_output(\"ping -c 1 %s\" % self.ip,\n shell=True, stderr=subprocess.STDOUT)\n self.time_last_seen = time.time()\n try:\n parsed_output = \\\n output.splitlines()[-1].split(' ')[3].split('/')\n latency_stats = [float(x) for x in parsed_output]\n # Since this was a single ping, min = max = avg\n self.buffer[self.current_ring_counter] = latency_stats[1]\n self.values_available = self.values_available + 1 \\\n if self.values_available < self.buffer_size \\\n else self.buffer_size\n self.current_ring_counter = \\\n (self.current_ring_counter + 1) % self.buffer_size\n\n except (KeyError, ValueError) as e:\n # Had one occasion when something was wrong with ping output\n rospy.logwarn(\"Unable to update latency statistics from \" +\n self.ip + \". Error parsing ping output: \" +\n str(e))\n except subprocess.CalledProcessError:\n # Ping failed. Do not update time last seen\n pass\n rate.sleep()", "def start_interface():\n\n last_ip = None\n\n while True:\n time.sleep(5)\n current_ips = get_local_ip().split()\n\n # check if a network address was found\n if len(current_ips) == 0:\n communication = interaction.Communication.instance()\n communication.lost_connection()\n continue\n elif len(current_ips) == 1:\n if not current_ips[0][:3] == \"192\":\n communication = interaction.Communication.instance()\n communication.lost_connection()\n continue\n else:\n current_ip = current_ips[0]\n else:\n if current_ips[0][:3] == \"192\":\n current_ip = current_ips[0]\n else:\n current_ip = current_ips[1]\n\n # restar webservers if the IP is new\n if not current_ip == last_ip:\n last_ip = current_ip\n print(f\"Found new ip: {current_ip}\")\n\n agent = Agent.instance()\n communication = interaction.Communication.instance()\n communication.set_local_ip(current_ip)\n driver = Driver.instance()\n sensor_manager = SensorManager.instance()\n action_manager = interaction.ActionManager.instance()\n\n interface = WebInterface(agent, driver, sensor_manager, action_manager)\n interface.start(current_ip)", "def start(self, sniffer):\n pass", "def run( self ):\n while True:\n try:\n time.sleep( 5 )\n self._monitorProcess()\n except Exception, e:\n self.logger.exception( \"Error starting monitor process\" )", "def process_request(t):\n time.sleep(t)", "def process_request(t):\n time.sleep(t)", "def process_request(t):\n time.sleep(t)", "def packetSniff():\n\n packets = psutil.net_io_counters(pernic=True)\n interfaces = {}\n x = 0\n for p in packets.items():\n values = {}\n values['name'] = p[0]\n values['bytes_sent'] = p[1][0]\n values['bytes_recv'] = p[1][1]\n values['pckt_sent'] = p[1][2]\n values['pckt_recv'] = p[1][3]\n values['errin'] = p[1][4]\n values['errout'] = p[1][5]\n values['dropin'] = p[1][6]\n values['dropout'] = p[1][7]\n\n if ((values['bytes_sent'] or values['bytes_recv'] or\n values['pckt_sent'] or values['pckt_recv']) != 0):\n\n interfaces[x] = values\n x += 1\n else:\n pass\n\n return interfaces", "def start_sniffing(self, iface, udp_dport=UDP_TRPT_DST_PORT):\n logger.info(\"AE monitoring iface %s\", iface)\n sniff(iface=iface,\n prn=lambda packet: self.handle_packet(packet, udp_dport),\n stop_filter=lambda p: self.sniff_stop.is_set())" ]
[ "0.7126685", "0.68830526", "0.6274077", "0.605297", "0.5967374", "0.5935252", "0.5896744", "0.5872024", "0.58675337", "0.5812287", "0.57800436", "0.57372296", "0.56378335", "0.5585778", "0.5581399", "0.5579333", "0.55172724", "0.54108804", "0.5356755", "0.5340449", "0.5304415", "0.52877825", "0.52523416", "0.5249605", "0.5226788", "0.52215135", "0.52215135", "0.52215135", "0.5213075", "0.5208472" ]
0.7301768
0
Function monitors IP traffic in real time using Scapy's sniff function and result stores in the sqlite3 database.
def ip_monitoring_callback(db_connection, net_interfaces, device_id, host_ip): # For example, if 10.2.x.x talks to 10.1.x.x that is fine. subnet_ip_list = [] for data in net_interfaces: local_ip = data['ip'] local_ip = local_ip.split('.') subnet_ip_list.append(str(local_ip[0])) host_parts = host_ip.split('.') host_parts = str(host_parts[0]) + '.' + str(host_parts[1]) subnet_ip_list.append(host_parts) """ Note that if I want to pass parameters into the sniff's custom_action function for additional control or the ability to modularize out the custom_action function, I have to use a nested function. """ def upload_packet(packet): utc_time_now = utils.get_unix_epoch_milliseconds() if (packet and packet[0][1].src and packet[0][1].dst and str(packet[0][1].dst) != '255.255.255.255' and str(packet[0][1].src) != '255.255.255.255'): source_ip = packet[0][1].src destination_ip = packet[0][1].dst # Skip localhost and multicast DNS if (source_ip != '127.0.0.1' and destination_ip != '127.0.0.1' and source_ip != '239.255.255.250' and destination_ip != '239.255.255.250' and source_ip != '224.0.0.251' and destination_ip != '224.0.0.251'): src_parts = source_ip.split('.') src_parts = str(src_parts[0]) dst_parts = destination_ip.split('.') dst_parts = str(dst_parts[0]) if (src_parts in subnet_ip_list) and (dst_parts in subnet_ip_list): event = 'IP traffic is inside of local network' # Not sure if it makes sense to store this data on the database. #ip_traffic_data = {'ts': utc_time_now, 'src': source_ip, 'dst': destination_ip, 'not_local_ip': ''} else: not_local_ip = source_ip if src_parts in subnet_ip_list: not_local_ip = destination_ip ip_traffic_data = {'ts': utc_time_now, 'src': source_ip, 'dst': destination_ip, 'not_local_ip': not_local_ip} db.insert_ip_traffic_data(db_connection, ip_traffic_data) return upload_packet
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run_ip_traffic_monitor(db_connection, net_interfaces, device_id, agent_ip):\n\n while 1:\n try:\n arp_traffic_out = sniff(prn = ip_monitoring_callback(db_connection, net_interfaces, device_id, agent_ip), filter = \"ip\", store = 0);\n except:\n utc_time_now = str(datetime.utcnow())\n print('[' + utc_time_now + '] ' + 'An exception in ip_monitoring_callback function')\n\n # Just random sleep between 1 and 5 seconds before starting next round.\n # This will happen if an exception occurs in ip_monitoring_callback.\n random_seconds = random.randint(1, 6)\n time.sleep(random_seconds)", "def query_sniff(pkt):\n if IP in pkt:\n ip_src = pkt[IP].src\n ip_dst = pkt[IP].dst\n\n if pkt.haslayer(DNS) and pkt.getlayer(DNS).qr == 0:\n domain = pkt.getlayer(DNS).qd.qname.decode(\"utf-8\")\n now = datetime.now()\n stored_dns_requests.update({datetime.timestamp(now): domain})\n print(\"SRC: {} - DST: {} : {}\".format(ip_src, ip_dst, domain))", "def sniff_ip(time_to_sniff):\r\n ip_dict = dict()\r\n port_dict = dict()\r\n packets = sniff(timeout=time_to_sniff, filter=\"ip\")\r\n\r\n for i in packets:\r\n sport = 0\r\n src = i['IP'].src\r\n\r\n if \"TCP\" in i:\r\n sport = i['TCP'].sport\r\n\r\n elif \"UDP\" in i:\r\n sport = i['UDP'].sport\r\n\r\n if not src in ip_dict.keys():\r\n ip_dict[src] = 1\r\n\r\n else:\r\n ip_dict[src] += 1\r\n\r\n if sport:\r\n if not sport in port_dict.keys():\r\n port_dict[sport] = 1\r\n\r\n else:\r\n port_dict[sport] += 1\r\n\r\n return ip_dict, port_dict", "def sniff_online(args):\n print('viewer: listening on ' + args.interface)\n\n try:\n sniffer = pcapy.open_live(args.interface, 65536, 1, 1)\n sniffer.setfilter('icmp')\n except Exception as e:\n print(e)\n sys.exit(-1)\n\n if not args.count:\n count = True\n else:\n count = args.count\n\n while count:\n (header, packet) = sniffer.next()\n if header:\n tts = header.getts()\n ret = parse_ICMP_Echo(tts[0] + tts[1] / 1000000, packet)\n\n if ret and args.count:\n count -= 1", "def sniffing():\n sniff(store=False, prn=lambda p: threading.Thread(target=next, args=(p,)).start(), iface=IFACE)", "def sniff_traffic(hs, count, timeout, traffic_type, pkt_type, exp_dst, step):\n iface = hs.ports['eth1']\n step('Scapy capture started')\n if (traffic_type == \"encap\"):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, timeout={}, \"\n \" filter='port 4789 and (!icmp or !ip6)', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, traffic_type, pkt_type, exp_dst, step=step)\n elif (traffic_type == \"decap\"):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, \"\n \" timeout={}, filter='!icmp or !ip6', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, traffic_type, pkt_type, exp_dst, step=step)", "def sniff_packets(iface=None):\n if iface: # (http)\n sniff(filter=\"port 80\", prn=process_packet, iface=iface, store=False)\n # 'process_packet' is the callback\n else:\n sniff(filter=\"port 80\", prn=process_packet, store=False)\n # default interface", "def sniffer():\n try:\n sniff(iface=INTERFACE, prn=print_frame, filter='udp and (port bootps or bootps)', store=0)\n except Exception as _e:\n print(\"ERROR - sniffer(): {} {}\".format(_e.args, _e.message))", "def sniffing():\n sniff(store=False, prn=lambda p: threading.Thread(target=sendHash, args=(p,)).start(), iface=IFACE)", "def capture(self, subnet, n=100):\n\n\t\tcapture_filter = \"ip and src net \" + subnet\n\t\tpackets = sniff(iface=self.iface, prn=self.callback, count=n, filter=capture_filter, store=0)\n\t\twrpcap(\"sniff.pcap\", packets)", "def sniff_traffic(hs, count, timeout, recipient_type, pkt_type,\n exp_src, exp_dst, testlog):\n iface = hs.ports['eth1']\n\n # If host is NVP, sniff using a filter that checks for UDP packets\n if (\"NVP\" in recipient_type):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, timeout={}, \"\n \" filter='port 4789 and (!icmp or !ip6)', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, recipient_type, pkt_type,\n exp_src, exp_dst, testlog)\n # If host is AVP, sniff using a filter that checks for Ethernet packets\n elif (\"AVP\" in recipient_type):\n packet = hs.libs.scapy.sniff2(\"iface='{}', count={}, \"\n \" timeout={}, filter='!icmp or !ip6', \"\n \" prn=lambda x:x.show()\".format(\n iface, count, timeout), True)\n parse_packet(packet, recipient_type, pkt_type,\n exp_src, exp_dst, testlog)", "def run_forever(self):\n scapy.sniff(prn=self.arp_cb, filter=\"arp\", store=0, count=0)", "def run(self):\n print \"Starting Packet Sniffer on [ %s ]:[ %s ]...\" % (self.ifname, self.packet_filter_string)\n self.socket = conf.L2listen(\n type=ETH_P_ALL,\n iface=self.ifname,\n filter=self.packet_filter_string\n )\n\n sniff(\n opened_socket=self.socket,\n #filter=self.packet_filter_string,\n lfilter=self.is_not_outgoing,\n # prn=self.print_packet,\n prn=self.sniffer_callback,\n stop_filter=self.should_stop_sniffer\n )", "def tcpdump(timeout, q, interface):\t\n\tlogging.debug('tcpdump -s 1024 -lqnAt tcp port 80 -i eth0')\n\t# tcpdump -s 1024 -lqnAt tcp port 80\n\t\t\n\tcommand = Command(['/usr/sbin/tcpdump', '-s 1024', '-lnAq', '-i', interface], timeout)\n\tcommand.run()\n\n\t# when it's executing here, the results have been available\n\t# print command.out\n\n\tif command.out is not None:\n\t\t# pattern = \"time=([0-9]+\\.[0-9]+) ms\"\n\t\tip_pattern = \"IP ([0-9]+.[0-9]+.[0-9]+.[0-9]+).[0-9]+ > [0-9]+.[0-9]+.[0-9]+.[0-9]+.[0-9]\"\n\t\tgoogle_pattern = \"domain=.google.com\"\n\t\tlines = command.out.split('\\n')\n\t\tlast_ip = None\n\n\t\t# first time scan for google's return ip\n\t\tfor line in lines:\n\t\t\tip_src = re.search(ip_pattern, line)\n\t\t\tif ip_src is not None:\n\t\t\t\tlast_ip = ip_src.group(1)\n\t\t\tif re.search(google_pattern, line):\n\t\t\t\tprint last_ip\n\t\t\t\tbreak\n\n\t\tgEntries = []\n\t\tif last_ip is not None:\n\t\t\t\n\t\t\t# second time scan parse tcpdump for query entries\n\t\t\tfor line in lines:\n\t\t\t\tlast_ip_pos = re.search(last_ip, line)\n\t\t\t\tif last_ip_pos is None:\n\t\t\t\t\tcontinue\n\t\t\t\n\t\t\t\tif line.index('>') > last_ip_pos.start():\n\t\t\t\t\t# from remote to this place\n\t\t\t\t\ttraffic_type = 1\t\t\t\n\t\t\t\telse:\n\t\t\t\t\t# out to remote\n\t\t\t\t\ttraffic_type = 0\n\t\t\t\n\t\t\t\ttime_pattern = \"([0-9]+:[0-9]+:[0-9]+.[0-9]+) IP\"\n\t\t\t\ttimestamp = re.search(time_pattern, line)\n\t\t\t\tif timestamp is not None:\n\t\t\t\t\ttime_str = timestamp.group(1)\n\t\t\t\t\th, m, s, ms = map(int, re.split(r'[.:]+', time_str))\n\t\t\t\t\ttimestamp_delta = timedelta(hours=h, minutes=m, seconds=s, microseconds=ms)\n\t\t\t\t\tgEntries.append( (timestamp_delta, traffic_type) )\n\t\t\t\telse:\n\t\t\t\t\tgEntries.append( (None, -1))\n\n\t\tq.put((command.returncode, last_ip, gEntries))\n\t\treturn", "def track(self):\n scapy.all.sniff(prn = self.add)", "def packetSniff():\n\n packets = psutil.net_io_counters(pernic=True)\n interfaces = {}\n x = 0\n for p in packets.items():\n values = {}\n values['name'] = p[0]\n values['bytes_sent'] = p[1][0]\n values['bytes_recv'] = p[1][1]\n values['pckt_sent'] = p[1][2]\n values['pckt_recv'] = p[1][3]\n values['errin'] = p[1][4]\n values['errout'] = p[1][5]\n values['dropin'] = p[1][6]\n values['dropout'] = p[1][7]\n\n if ((values['bytes_sent'] or values['bytes_recv'] or\n values['pckt_sent'] or values['pckt_recv']) != 0):\n\n interfaces[x] = values\n x += 1\n else:\n pass\n\n return interfaces", "def accumulate_packets():\n l = []\n packets = sniff(count=NUMBER_OF_SNIFFING_ROUNDS, lfilter=fltr, prn=printing)\n print(\"Processing packets!\")\n for packet in packets:\n l.append({\"ip\": get_ip(packet),\n \"country\": get_country(packet),\n \"entering\": is_entering(packet),\n \"port\": get_partner_port(packet),\n \"size\": packet[IP].len, #the len of the ip layer is the len of the entire packet\n \"program\": get_program(packet)})\n return l", "def main():\n\n cmd = \"ping\"\n\n if os.environ.get(\"IPV6\", \"0\") == \"1\":\n cmd = \"ping6\"\n\n output = subprocess.check_output([cmd, os.environ[\"ADDRESS\"], \"-A\", \"-c\", os.environ.get(\"COUNT\", \"1\")])\n match = re.search(r\"(\\d+) packets transmitted, (\\d+) received\", output)\n\n if match:\n if match.group(1) != match.group(2):\n if match.group(2) != \"0\":\n alert(\"ping_packetloss\", {\n \"sent\": match.group(1),\n \"received\": match.group(2)\n })\n\n reading(\"loss\", (int(match.group(1)) - int(match.group(2))) * 100.0 / int(match.group(1)))\n else:\n reading(\"loss\", 100)\n alert(\"ping_failed\", {\n \"reason\": \"no_packet_received\"\n })\n else:\n reading(\"loss\", 0)\n\n rtt = re.search(r\"rtt min/avg/max/mdev = ([0-9.]+)/([0-9.]+)/([0-9.]+)/([0-9.]+) ms\", output)\n if rtt:\n reading(\"rtt\", float(rtt.group(2)))\n else:\n alert(\"ping_failed\", {\n \"reason\": \"command_failed\",\n \"output\": output\n })", "def get_ips():\n sql = sqlite3.connect('data.db')\n\n cursor = sql.cursor()\n\n get_ip = \"\"\"SELECT ip FROM Status\"\"\"\n\n ip = cursor.execute(get_ip).fetchall()\n\n cursor.close()\n\n return ip", "def run_ipdb(_step):\r\n import ipdb\r\n ipdb.set_trace()\r\n assert True", "def save(self, ip='', result='', dt=datetime.datetime.now()):\n self.ping_table.insert({\"host\": ip, \"result\": result, \"datetime\": str(dt)})\n return", "def preprocess_capture(data, ip_version=4, transp_layer=\"TCP\"):\n #SEE: https://www.winpcap.org/ntar/draft/PCAP-DumpFileFormat.html\n\n #TODO Implement ipv6, udp and ICMP\n if ip_version == 4:\n pass\n else:\n raise ValueError('IP version must be \"4\"')\n\n if transp_layer == \"TCP\":\n pass\n else:\n raise ValueError('transport layer must be TCP')\n\n try:\n capt = pyshark.FileCapture(data, keep_packets=False, display_filter='tcp')\n except:\n exit(\"Could not open pcap file\")\n\n ip_fields = ['src', 'dst', 'flags_df', 'flags_mf', 'hdr_len', 'len', 'ttl']\n tcp_fields = ['srcport', 'dstport', 'flags_ack', 'flags_fin', 'flags_push',\n 'flags_reset', 'flags_syn', 'flags_urg', 'hdr_len', 'len']\n\n #Temporary list to feed the final DataFrame (Performance)\n tmp = []\n counter = 0\n logging.info(\"Starting packet processing\")\n for pkt in capt:\n filtered = {}\n #First field is a empty string (ignoring)\n if hasattr(pkt, 'ip'):\n for field in ip_fields:\n #Changing field names for disambiguation in columns\n filtered[\"ip_\"+field] = pkt[\"ip\"].get_field(field)\n else:\n continue\n if hasattr(pkt, 'tcp'):\n for field in tcp_fields:\n #Changing field names for disambiguation in columns\n filtered[\"tcp_\"+field] = pkt[\"tcp\"].get_field(field)\n else:\n continue\n tmp.append(filtered)\n counter += 1\n if counter % 1000 == 0:\n logging.info(\"Processed %d packets\", counter)\n logging.info(\"Ended packet processing\")\n logging.info(\"Converting list to DataFrame\")\n X = pd.DataFrame(tmp)\n logging.info(\"Ended list conversion\")\n return X", "def ip_command():\n # 1. Get input host from Demisto\n ip = demisto.args().get('ip')\n if not is_ip_valid(ip):\n return_error('Invalid IP address, Please retry with a valid IP address')\n # 2. Get the host reputation from SlashNext API\n response = ip_lookup(ip=ip)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n dbot_score_cont, ip_cont = get_dbot_std_context(\n ip, 'IP', response.get('threatData').get('verdict'), response.get('threatData').get('threatType'))\n\n snx_ioc_cont = get_snx_host_ioc_context(ip, 'IP', response.get('threatData'))\n\n ec = {\n 'SlashNext.IP(val.Value === obj.Value)': snx_ioc_cont,\n 'DBotScore': dbot_score_cont,\n 'IP': ip_cont\n }\n\n title = 'SlashNext Phishing Incident Response - IP Lookup\\n' \\\n '##### ip = {}'.format(ip)\n\n md = tableToMarkdown(\n title,\n snx_ioc_cont,\n ['Value',\n 'Type',\n 'Verdict',\n 'ThreatStatus',\n 'ThreatName',\n 'ThreatType',\n 'FirstSeen',\n 'LastSeen']\n )\n\n return_outputs(md, ec, snx_ioc_cont)", "def _parse(self):\n \n # HUA determine the host ip address\n # read 20 packages and set the most frequent one\n ips_dict = {}\n count = 0\n for raw_packet in self.raw_packets:\n if count > 100: break\n ethernet = Ethernet(raw_packet[0:14])\n if(ethernet.type != 'IP'):\n continue\n ip = Ip(raw_packet[14:])\n if(ip.protocol != 'TCP') :\n continue\n if(ip.src not in ips_dict):\n ips_dict[ip.src] = 0\n ips_dict[ip.src] += 1\n if(ip.dst not in ips_dict):\n ips_dict[ip.dst] = 0\n ips_dict[ip.dst] += 1\n # get the most frequent one\n max_appear = 0\n ip = None\n for key, value in ips_dict.items():\n if value > max_appear:\n ip = key\n max_appear = value\n\n global _device_ip\n if not self.enableFilter or not _device_ip:\n _device_ip = ip\n\n global _tcp_buf\n _tcp_buf = {}\n number = 0\n self.begin_ts = self.packet_headers[-1]['ts']\n rcount = 0\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n pcap_packet.pcap_num = rcount#number # add one to be consistent with wireshark\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n \n rcount += 1\n\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n\n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n\n\n\n\n # just collect the packets between \n \n if self.enableFilter and not (pcap_packet.ip.src == _device_ip and pcap_packet.ip.dst == SERVER_IP) \\\n and not (pcap_packet.ip.dst == _device_ip and pcap_packet.ip.src == SERVER_IP):\n #print \"Ignore ip not ok\"\n continue\n '''\n if rcount < 10 or rcount > 2600:\n print 'rcount %d, time %d ---: %f' % (rcount, number, self.packet_headers[rcount - 1]['ts'] - self._ts_base)\n '''\n \n self.pcap_packets.append(pcap_packet)\n \n\n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n\n\n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip, number)\n\n if pcap_packet.ip.src == _device_ip:\n pcap_packet.tcp.direction = \"out\"\n else:\n pcap_packet.tcp.direction = \"in\"\n\n\n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, number)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n number += 1\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]", "def monitor(self, \n iface: str,\n prn: callable=lambda p: print(p.summary()), # Function to apply against matching packet\n count: int=0, # \n session=None, \n filter: str=None,\n timeout=0,\n lfilter: callable=None):\n ready_event = Event()\n\n\n def notify_started():\n ''' Callback used by sniffer to event when it's actually started '''\n nonlocal ready_event\n ready_event.set()\n\n sniffer = AsyncSniffer(iface=iface, \n session=session,\n count=count,\n prn=prn,\n monitor=True,\n filter=filter,\n lfilter=lfilter,\n timeout=timeout,\n started_callback=notify_started)\n\n sniffer.start()\n\n\n def join():\n ''' Start task to join sniffer, this will return as soon as the\n sniffer has finished, or until it's timeout has been reached.\n returns result which will be available through the future\n returned by the thread executor. '''\n nonlocal sniffer\n sniffer.join()\n\n\n ''' Wait until sniffer has actually started '''\n if not ready_event.wait(timeout=5):\n raise Exception('Sniffer did not start!')\n\n with self._monitor_lock:\n self._monitors.append(sniffer)\n\n return self._executor.submit(join)", "def saw_ip(self, ip):\n from sqlalchemy.exc import IntegrityError\n c = self.ipSurvey.columns\n v = {\n c[\"ipAddress\"]: ip,\n c[\"lastSeen\"]: \"now()\",\n }\n # Update if already in table, otherwise insert new row\n if self.session.execute(self.ipSurvey.update(c[\"ipAddress\"] == ip, values=v)).rowcount == 0:\n self.session.execute(self.ipSurvey.insert(values=v))", "def sniff(self, func=None, timeout=None):\n msg = None\n while True:\n msg = self.shell.client.get_stream_packet(type_=\"packet\", timeout=timeout)\n if func is not None:\n func(msg)\n else:\n break\n return msg", "def IperfTCP(target_src, target_dst, dst, length, window=None):\n iperf = IperfSet(target_src, target_dst, dst)\n iperf.Start(length, None, window)\n return iperf.Results()", "def sniffer_callback(self, pkt):\n #if \"Ether\" in pkt and \"IP\" in pkt and \"TCP\" in pkt:\n if \"TCP\" in pkt:\n\n # Debug check for packet details\n # print(pkt.summary())\n\n if pkt[TCP].payload:\n # print(\"[PAYLOAD]:\\n%s\" % pkt[TCP].payload)\n self.callback_object.process_packet(pkt)\n\n # Ignore packets without payload\n # else:\n # print(\"[NO-LOAD]Packet does not have payload!\")", "def start_sniffing(self, iface, udp_dport=UDP_TRPT_DST_PORT):\n logger.info(\"AE monitoring iface %s\", iface)\n sniff(iface=iface,\n prn=lambda packet: self.handle_packet(packet, udp_dport),\n stop_filter=lambda p: self.sniff_stop.is_set())" ]
[ "0.68234485", "0.66879004", "0.6390416", "0.63076127", "0.61595774", "0.60337055", "0.5978882", "0.59621894", "0.59603024", "0.5897678", "0.587754", "0.58491415", "0.5800434", "0.5795848", "0.5759272", "0.57397294", "0.55257237", "0.54305667", "0.5375751", "0.529121", "0.5286392", "0.5235193", "0.5215558", "0.52037233", "0.51750326", "0.5158545", "0.5150281", "0.5136566", "0.51236683", "0.51152873" ]
0.6899458
0
prettyprinter for dumping this variable to _file
def prettyprint(self, _file): xstr = "var " + self.name + " " + self.type.desc() _file.write(xstr + "\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prettyprint(self, _file):\n xstr = \"reg \" + self.name + \" \" + self.type.desc()\n _file.write(xstr + \"\\n\")", "def prettyprint(self, _file):\n _file.write(\"Function %s returns %s\\n\" % (self.name, self.returnType))\n _file.write(\" local vars\\n\")\n for val in self.vars.values():\n _file.write(\" \")\n val.prettyprint(_file)\n _file.write(\" params\\n\")\n for val in self.params.values():\n _file.write(\" \")\n val.prettyprint(_file)\n _file.write(\" registers\\n\")\n for val in self.virtRegs.values():\n _file.write(\" \")\n val.prettyprint(_file)\n _file.write(\" code\\n\")\n for instr in self.instrs():\n if isinstance(instr, CLABEL):\n indent = \" \"\n else:\n indent = \" \"\n _file.write(indent + str(instr) + \"\\n\")", "def prettyprint(self, _file):\n for var in self.variables:\n var.prettyprint(_file)\n for fun in self.functions:\n fun.prettyprint(_file)", "def pretty_print(self):\n pt = PrettyTable()\n for i in self.files_summary:\n pt.field_names = [\"File Name\", \"Classes\", \"Functions\", \"Lines\", \"Characters\"]\n pt.add_row(list([i, self.files_summary[i][\"class\"], self.files_summary[i][\"function\"], self.files_summary[i][\"line\"], self.files_summary[i][\"char\"]]))\n print(pt) #Using a Print statement here because i tried to return self.pt and it didnt give me anything but the print works", "def _prettyfilename(self):\n return f'{self.title} ({self.subtype})'", "def ugly():\n\n global _pretty\n _pretty = False", "def _prettyfilename(self):\n return f'{self.title} ({self.year})'", "def dump(self, value, filename):\n\n super().dump(value=value, filename=filename)", "def _prettyfilename(self):\n return self.title", "def dump(self) :\n st = \"%s=%s, valid=%d, found=%d, type=%s stringValue=%s\" \\\n %(self.name_, str(self.value_), self.valid_, self.found_, \\\n self.type_, self.stringValue_)\n print st", "def pretty_str(self) -> str:\n ...", "def _prettyfilename(self):\n return f'{self.grandparentTitle} - {self.seasonEpisode} - {self.title}'", "def __str__(self):\n buf = io.StringIO()\n args.output.write(buf, self.root, self.headings)\n return buf.getvalue()", "def print_structure(file_path):\n pprint(read_or_exit(file_path), width=140)", "def dumps(self) -> str:\n ...", "def _PrintFunc(self, obj=None, verbose=False, summarize=True, recursive=False,\n use_pager=None, to_file=None):\n if obj is not None:\n self._printed_variables.append(obj)\n lines = describe.GenerateLines(\n obj, verbose=verbose, recursive=recursive, summarize=summarize,\n format_name='text')\n _WriteToStream(lines, use_pager=use_pager, to_file=to_file)", "def write(self, fp):\n if self._defaults:\n fp.write(\"[%s]\\n\" % DEFAULTSECT)\n for (key, value) in self._defaults.items():\n fp.write(\"%s = %s\\n\" % (key, str(value).replace('\\n', '\\n\\t')))\n fp.write(\"\\n\")\n for section in self._sections:\n fp.write(\"[%s]\\n\" % section)\n for (key, value) in self._sections[section].items():\n if key == \"__name__\":\n continue\n if (value is not None) or (self._optcre == self.OPTCRE):\n key = \" = \".join((key, str(value).replace('\\n', '\\n\\t')))\n fp.write(\"%s\\n\" % (key))\n fp.write(\"\\n\")", "def p(value):\n pp.pprint(value)", "def dump(self) -> None:\n ...", "def write_pretty(reviewer_data, file_obj):\n table = prettytable.PrettyTable(\n ('Reviewer',\n 'Reviews -2 -1 +1 +2 +A +/- %',\n 'Disagreements*'))\n for (name, r_data, d_data) in reviewer_data:\n r = '%7d %3d %3d %3d %3d %3d %s' % r_data\n d = '%3d (%s)' % d_data\n table.add_row((name, r, d))\n file_obj.write(\"%s\\n\" % table)", "def _format(self):\n output = f\"\\n{color('>>> DUMP')} from {self.filename}: {color(f'L{self.line}')} in {color(f'{self.method}()')}\"\n\n for name, obj in self.objects.items():\n output += f\"\\n\\n{color(f' - {name}:')}\\n\"\n output += f\" {pformat(obj, width=110, indent=4)}\"\n\n output += color(\"\\n\\n<<< END\")\n return output", "def __str__(self) -> str:\n\n return f\"{self.filename}:{self.line}:{self.flag}\"", "def printpretty(self):\n print(self.string_rep())", "def PrettyPrint(self):\r\n print(self.data)\r\n return", "def __repr__(self):\n result = '\"{0}\"'.format(self._filepath.unexpanded)\n if self.nonlocal is None: result += \", None\"\n else: result += ', \"%s\"' % (self._nonlocal.unexpanded)\n result += \", %f, %f, %f, %f, %f\" % (self.s, self.p, self.d, self.pnl, self.dnl)\n return result", "def _DebugPrintFileEntry(self, file_entry):\n if self.file_format in ('bin-big-endian', 'bin-little-endian'):\n value_string = f'0x{file_entry.signature:04x}'\n else:\n value_string = f'{file_entry.signature!s}'\n\n self._DebugPrintValue('Signature', value_string)\n\n if self.file_format not in ('crc', 'newc'):\n self._DebugPrintValue('Device number', f'{file_entry.device_number:d}')\n\n self._DebugPrintValue('Inode number', f'{file_entry.inode_number:d}')\n\n self._DebugPrintValue('Mode', f'{file_entry.mode:o}')\n\n self._DebugPrintValue(\n 'User identifier (UID)', f'{file_entry.user_identifier:d}')\n\n self._DebugPrintValue(\n 'Group identifier (GID)', f'{file_entry.group_identifier:d}')\n\n self._DebugPrintValue('Number of links', f'{file_entry.number_of_links:d}')\n\n if self.file_format not in ('crc', 'newc'):\n self._DebugPrintValue(\n 'Special device number', f'{file_entry.special_device_number:d}')\n\n self._DebugPrintValue(\n 'Modification time', f'{file_entry.modification_time:d}')\n\n if self.file_format not in ('crc', 'newc'):\n self._DebugPrintValue('Path size', f'{file_entry.path_size:d}')\n\n self._DebugPrintValue('File size', f'{file_entry.file_size:d}')\n\n if self.file_format in ('crc', 'newc'):\n self._DebugPrintValue(\n 'Device major number', f'{file_entry.device_major_number:d}')\n self._DebugPrintValue(\n 'Device minor number', f'{file_entry.device_minor_number:d}')\n\n self._DebugPrintValue(\n 'Special device major number',\n f'{file_entry.special_device_major_number:d}')\n self._DebugPrintValue(\n 'Special device minor number',\n f'{file_entry.special_device_minor_number:d}')\n\n self._DebugPrintValue('Path size', f'{file_entry.path_size:d}')\n\n self._DebugPrintValue('Checksum', f'0x{file_entry.checksum:08x}')", "def __repr__(self) -> str:\n\n return f\"{self.filename}:{self.line}:{self.flag}\"", "def pretty(self, **kwargs):\r\n raise NotImplementedError", "def printShader(self):\n print self.file", "def print_tags(self, filename):\n fh = open(filename, 'w')\n for t in self.source_tags.tags:\n fh.write(\"%d\\t%d\\t%s\" % (t.begin, t.end, t.name))\n for (attr, val) in t.attrs.items():\n fh.write(\"\\t%s=\\\"%s\\\"\" % (attr, val.replace('\"','&quot;')))\n fh.write(\"\\n\")" ]
[ "0.7163909", "0.66443485", "0.6586192", "0.65374786", "0.6152915", "0.6114037", "0.60867304", "0.59884775", "0.5986953", "0.5972376", "0.5882353", "0.57969296", "0.5763943", "0.5755351", "0.5739949", "0.5702993", "0.5678127", "0.56770426", "0.5674878", "0.56744444", "0.566506", "0.56471175", "0.5616777", "0.5616504", "0.5598522", "0.5594725", "0.558788", "0.55698144", "0.5558623", "0.55439794" ]
0.81848353
0
prettyprinter for dumping this register to _file
def prettyprint(self, _file): xstr = "reg " + self.name + " " + self.type.desc() _file.write(xstr + "\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prettyprint(self, _file):\n xstr = \"var \" + self.name + \" \" + self.type.desc()\n _file.write(xstr + \"\\n\")", "def prettyprint(self, _file):\n _file.write(\"Function %s returns %s\\n\" % (self.name, self.returnType))\n _file.write(\" local vars\\n\")\n for val in self.vars.values():\n _file.write(\" \")\n val.prettyprint(_file)\n _file.write(\" params\\n\")\n for val in self.params.values():\n _file.write(\" \")\n val.prettyprint(_file)\n _file.write(\" registers\\n\")\n for val in self.virtRegs.values():\n _file.write(\" \")\n val.prettyprint(_file)\n _file.write(\" code\\n\")\n for instr in self.instrs():\n if isinstance(instr, CLABEL):\n indent = \" \"\n else:\n indent = \" \"\n _file.write(indent + str(instr) + \"\\n\")", "def prettyprint(self, _file):\n for var in self.variables:\n var.prettyprint(_file)\n for fun in self.functions:\n fun.prettyprint(_file)", "def pretty_print(self):\n pt = PrettyTable()\n for i in self.files_summary:\n pt.field_names = [\"File Name\", \"Classes\", \"Functions\", \"Lines\", \"Characters\"]\n pt.add_row(list([i, self.files_summary[i][\"class\"], self.files_summary[i][\"function\"], self.files_summary[i][\"line\"], self.files_summary[i][\"char\"]]))\n print(pt) #Using a Print statement here because i tried to return self.pt and it didnt give me anything but the print works", "def dump(self) -> None:\n ...", "def dump(self) :\n st = \"%s=%s, valid=%d, found=%d, type=%s stringValue=%s\" \\\n %(self.name_, str(self.value_), self.valid_, self.found_, \\\n self.type_, self.stringValue_)\n print st", "def _print_custom(self):\n pass", "def dumps(self) -> str:\n ...", "def print_tags(self, filename):\n fh = open(filename, 'w')\n for t in self.source_tags.tags:\n fh.write(\"%d\\t%d\\t%s\" % (t.begin, t.end, t.name))\n for (attr, val) in t.attrs.items():\n fh.write(\"\\t%s=\\\"%s\\\"\" % (attr, val.replace('\"','&quot;')))\n fh.write(\"\\n\")", "def dump(self):\n return", "def pprint(self):\n # just here for defining the interface; work is done in subclasses\n pass", "def pprint(self):\n def pprintStr(node):\n s = \"(\" + str(node.value) \n for action in node.children:\n s = s + \", \" + pprintStr(node.children[action])\n s = s + \")\"\n return s\n\n print pprintStr(self)", "def printShader(self):\n print self.file", "def _format(self):\n output = f\"\\n{color('>>> DUMP')} from {self.filename}: {color(f'L{self.line}')} in {color(f'{self.method}()')}\"\n\n for name, obj in self.objects.items():\n output += f\"\\n\\n{color(f' - {name}:')}\\n\"\n output += f\" {pformat(obj, width=110, indent=4)}\"\n\n output += color(\"\\n\\n<<< END\")\n return output", "def dump(self, prefix = \" - \"):\n\t\tlines = [\n\t\t\t\"MAC: \" + \":\".join(\n\t\t\t\t\"%02x\" % (ord(o), ) for o in self.mac),\n\t\t\t\"HW %d, SW %d\" %\n\t\t\t\t(self.hw_rev, self.sw_rev),\n\t\t\t\"Capabilities: max %d points, %d kpps\" %\n\t\t\t\t(self.buffer_capacity, self.max_point_rate)\n\t\t]\n\t\tfor l in lines:\n\t\t\tprint prefix + l\n\t\tif debug == 1:\n\t\t\tself.status.dump(prefix)", "def dump(self, filename, regFilterList=None, userMsg=''):\n pass", "def pretty_print_drt(self):\n self.drt_manager.pretty_print_drt()", "def dumps(self):\n pass", "def dump_reg(self):\r\n for reg in self.INTERNAL_REGS:\r\n print(f\"{reg} : {self._mu.reg_read(self.reg_map[reg]):X}\")", "def dump(self):\n print PccUtString.trimString(self.dumpBuf(), \"\\n\")\n return self", "def _pretty_print_token(self, token):\n INLINE = 0\n BOL = 1\n extended_print = ('ID', 'INT', 'FLOAT', 'STRING')\n next_line_tokens = ('NEWLINE', 'INDENT', 'DEDENT')\n\n if self.printer_state == BOL:\n self.printer_state = INLINE\n\n print(str(token.lineno) + self.level * \" \", end=' ')\n\n if token is None:\n pass\n elif token.type in next_line_tokens:\n if token.type == \"INDENT\":\n self.level += 1\n elif token.type == \"DEDENT\":\n self.level -= 1\n\n print(token.type + '\\n', end=' ')\n self.printer_state = BOL\n elif token.type in extended_print:\n print('(' + token.type + ', ' + str(token.value) + ')', end=' ')\n else:\n print(token.type, end=' ')", "def dump(self, mark):", "def print(self):\n print(self.pretty_str())", "def print_structure(file_path):\n pprint(read_or_exit(file_path), width=140)", "def dump(self, prefix = \" - \"):\n\t\tlines = [\n\t\t\t\"\"\n\t\t\t\"Host \",\n\t\t\t\"Light engine: state %d, flags 0x%x\" %\n\t\t\t\t(self.le_state, self.le_flags),\n\t\t\t\"Playback: state %d, flags 0x%x\" %\n\t\t\t\t(self.playback_state, self.playback_flags),\n\t\t\t\"Buffer: %d points\" %\n\t\t\t\t(self.fullness, ),\n\t\t\t\"Playback: %d kpps, %d points played\" %\n\t\t\t\t(self.point_rate, self.point_count),\n\t\t\t\"Source: %d, flags 0x%x\" %\n\t\t\t\t(self.source, self.source_flags)\n\t\t]\n\t\t'''\n\t\tif debug == 2:\n\t\t\tfor l in lines:\n\t\t\t\tprint prefix + l\n\t\t'''", "def printIns(self, stream):\n print(' ', str(self), file=stream)", "def prettyPrint(self):\n import pprint\n pp = pprint.PrettyPrinter(indent=4)\n x=pp.pformat(self.__dict__)\n print x\n return", "def dump(self):\n dump_grammar(self.rules)\n print(self.registry)", "def help_dump(self):\n print(DUMP)", "def dump(self):\n outputs = [\"Code object : %s\" % self.name]\n outputs.append(\" Type : %s\" % self.object_type)\n for source_line in self.source:\n # Each line is a (line_number, code) pair\n outputs.append('%d: %s' % source_line)\n return \"\".join(outputs)" ]
[ "0.747862", "0.69527394", "0.6617654", "0.6217994", "0.6206654", "0.61563045", "0.61102194", "0.5948794", "0.59108806", "0.5895563", "0.5882521", "0.58252144", "0.58229977", "0.5805466", "0.5795636", "0.5766275", "0.57474035", "0.5730405", "0.57264787", "0.572616", "0.5725961", "0.57143706", "0.57079715", "0.56872106", "0.5684654", "0.5652127", "0.5644621", "0.56367785", "0.56079876", "0.55953604" ]
0.8537647
0
generate new, unused label
def genLabel(self): self._nextlabelid += 1 return CLABEL(self._nextlabelid)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _unused_label(self, label):\n original = label\n existing = self.column_labels\n i = 2\n while label in existing:\n label = '{}_{}'.format(original, i)\n i += 1\n return label", "def uniqueLabel(self):\n label = f\"Label-{self.next_label}\"\n self.next_label += 1\n return label", "def generate_label(self):\n\n last = self.label\n self.label += 1\n self.P.append(last)\n\n return last", "def unique_label(orig_label: str) -> str:\n return orig_label[0] + \"l\" + uuid4().hex\n # TODO: check for meteors.", "def make_unscoped_label(self, label):\n self.write(\"(\" + label + \")\\n\")\n # write (label_name)", "def create_label(self, org, name):\n pass", "def test__create_label_w_no_ent_id(ruler: SpaczzRuler) -> None:\n assert ruler._create_label(\"TEST\", None) == \"TEST\"", "def gen_label(self, stmt: statements.Label) -> None:\n block = self.get_label_block(stmt.name)\n self.builder.emit_jump(block) # fall through\n self.builder.set_block(block)\n self.gen_stmt(stmt.statement)", "def make_label(self, node):\n\t\tcurstring = str(node.__class__)[13:-2]\n\t\tif isinstance(node, ast.Name):\n\t\t\tcurstring = node.id\n\t\telif isinstance(node, ast.Num):\n\t\t\tcurstring = str(node.n)\n\t\telif isinstance(node, ast.Str):\n\t\t\tcurstring = node.s\n\n\t\tif isinstance(node, ast.Load) or isinstance(node, ast.Store) or \\\n\t\t\tisinstance(node, ast.Param) or isinstance(node, ast.Add) or \\\n\t\t\tisinstance(node, ast.Sub) or isinstance(node, ast.Mult):\n\t\t\treturn None\n\n\t\ttry:\n\t\t\tself.labels[str(node)] = curstring\n\t\t\treturn str(node)\n\t\texcept AttributeError:\n\t\t\treturn None", "def label(self) -> str: # Enforcing every node defines a label\n pass", "def make_null_labeler():\n\n def labeler(data):\n return {**data, 'outcome': tf.zeros([1]), 'y0': tf.zeros([1]), 'y1': tf.zeros([1]), 'treatment': tf.zeros([1])}\n\n return labeler", "def add_label_to_unique_species_labels(self, label: str) -> str:\n unique_label, i = label, 0\n while unique_label in self.unique_species_labels:\n unique_label = f'{label}_{i}'\n i += 1\n self.unique_species_labels.append(unique_label)\n return unique_label", "def generate_labels(n_samples):\n return np.ones([n_samples, 1]), np.zeros([n_samples, 1])", "def fix_label_names():\n\n assert trace.cpu.trace_done\n binary_addr = memorymanager.BinaryAddr(0)\n while binary_addr < len(classifications):\n c = classifications[binary_addr]\n if c is not None:\n dummy = [str(x) for x in c.as_string_list(binary_addr, None)]\n binary_addr += c.length()\n else:\n binary_addr += 1", "def UpdateLabel(self) -> _n_6_t_0:", "def makeLabel(self):\n\n self.setIndexNames()\n\n if self.isInCore():\n self.getFirstChar()\n else:\n # stick with what we have. (default:ExCore)\n return\n self.label = self.firstChar + \"{0:03d}\".format(self.i2)\n if self.axial is not None:\n # add axial letter\n self.label = self.label + AXIAL_CHARS[self.axial]", "def cg_label(self, cmd):\n label = self.makeLabel(cmd)\n self.asm(f\"({label})\")", "def get_final_label(addr, context, move_id):\n #if addr == 0x6a7:\n # print(\"FFF\", hex(context), move_id)\n assert trace.cpu.trace_done\n assert memorymanager.is_valid_binary_addr(addr)\n assert memorymanager.is_valid_binary_addr(context)\n assert move_id is None or movemanager.is_valid_move_id(move_id)\n name, move_id = label_maker(addr, context, move_id)\n if is_simple_name(name):\n labelmanager.labels[addr].add_explicit_name(name, move_id)\n\n return name", "def createLabels(edge):\n k = removeLabel(edge)\n return k + \"_L\", k + \"_R\"", "def create_label(image_name,number):\r\n\r\n target=[]\r\n for i in range(0,number):\r\n target.append(0)\r\n target[image_name]=1\r\n\r\n return target", "def label(self):\r\n raise NotImplementedError", "def label(cls) -> str:\n return \"!lobotomy.unknown\"", "def label(cmd):\r\n cmd = cmd.replace('make][.DP', 'make1][.NP')\r\n cmd = cmd.replace('make][.SC', 'make2][.SC')\r\n cmd = re.sub('(draw.*)one','\\\\1one1',cmd)\r\n cmd = re.sub('(make1.*)one','\\\\1one1',cmd)\r\n cmd = re.sub('(make2.*)one','\\\\1one2',cmd)\r\n cmd = re.sub('(move.*)one','\\\\1one2',cmd)\r\n cmd = re.sub('(hide.*)one','\\\\1one2',cmd)\r\n cmd = '[result ' + cmd + ']' #dummy function for plop\r\n return cmd", "def _MaybeAddLabel(label_name):\n if label_name.lower() in labels_already_seen:\n return\n labels_already_seen.add(label_name.lower())\n if '-' in label_name:\n col, _value = label_name.split('-', 1)\n _MaybeAddCol(col)", "def label(cmd):\n cmd = cmd.replace('make][.DP', 'make1][.NP')\n cmd = cmd.replace('make][.SC', 'make2][.SC')\n cmd = re.sub('(draw.*)one','\\\\1one1',cmd)\n cmd = re.sub('(make1.*)one','\\\\1one1',cmd)\n cmd = re.sub('(make2.*)one','\\\\1one2',cmd)\n cmd = re.sub('(move.*)one','\\\\1one2',cmd)\n cmd = re.sub('(hide.*)one','\\\\1one2',cmd)\n cmd = '[result ' + cmd + ']' #dummy function for plop\n return cmd", "def label_generator(self):\r\n if self.label_mode == 'separate':\r\n return self.generate_separate_labels()\r\n elif self.label_mode == 'joint':\r\n return self.generate_joint_labels()", "def label(self):\n return ''", "def _create_label(self, label: str, ent_id: Union[str, None]) -> str:\n if isinstance(ent_id, str):\n label = \"{}{}{}\".format(label, self.ent_id_sep, ent_id)\n return label", "def ex_label(self,label,argl):\n if len(label) > 0 and label[0] != '_':\n return label\n comment = ''\n for i in argl:\n phrase = ''\n if i == 'l':\n phrase = label\n elif i in self._labels.keys():\n phrase = self._labels[i]\n comment += phrase\n return comment", "def createLabel(self, address: ghidra.program.model.address.Address, name: unicode, namespace: ghidra.program.model.symbol.Namespace, makePrimary: bool, sourceType: ghidra.program.model.symbol.SourceType) -> ghidra.program.model.symbol.Symbol:\n ..." ]
[ "0.72095937", "0.70172065", "0.6992559", "0.6919921", "0.6875562", "0.6685837", "0.6595837", "0.656445", "0.6552505", "0.6459706", "0.64372915", "0.63094926", "0.6299775", "0.62694937", "0.62503123", "0.62356275", "0.6233588", "0.6232511", "0.62244177", "0.6221866", "0.6218072", "0.6187783", "0.61424047", "0.61363864", "0.61076933", "0.6086462", "0.6083992", "0.6037634", "0.6034213", "0.60188955" ]
0.7646934
0
creates new, globally unique virtual register and returns it
def getFreeVirtReg(self, function, _type: common.Type): while True: name = "$R"+str(self._nextvregid) self._nextvregid += 1 if not name in self._used_names: break reg = VirtualRegister(name, _type) self._used_names.add(name) function.addVirtReg(reg) return reg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def register(self,registerable):\n result = self.registry.register(registerable)\n if result.reg_info.index is None:\n raise RuntimeError(\"failed to register {}\".format(str(registerable)))\n return result", "def __new__(cls, *args, **kwargs):\n obj = super(RegistersBank, cls).__new__(cls, *args, **kwargs)\n obj.__dict__ = cls.data\n\n return obj", "def register(self):\n raise NotImplementedError", "def register(self):\n raise NotImplementedError", "def register(cls, L):\r\n ...", "def register(self):\n raise NotImplementedError()", "def register(self):\n raise NotImplementedError(\"Should have implemented this\")", "def register(regname):\n global simulator\n if simulator is None:\n print \"Program is not started.\"\n return\n try:\n return simulator.get_register(regname)\n except LookupError, e:\n print e, regname\n except:\n simulation_error()", "def register(blk):\n pass", "def register(name):\n def func(cls):\n \"\"\"\n See register\n \"\"\"\n REGISTRY[name] = cls()\n return cls\n return func", "def get_reg():\n return embbeding_reg", "def test_register_function(self):\n registry = ClassRegistry()\n\n @registry.register('fire')\n def pokemon_factory(name=None):\n return Charmeleon(name=name)\n\n poke = registry.get('fire', name='trogdor')\n\n self.assertIsInstance(poke, Charmeleon)\n self.assertEqual(poke.name, 'trogdor')", "def regen(self):\n self.create(overwrite=True)\n self.load()", "def test_create_anonymous_classical_register(self):\n cr = ClassicalRegister(size=3)\n self.assertIsInstance(cr, ClassicalRegister)", "def create_instance(self,name):\n print \"INFO : new %s\" % name\n return self.get_class(name)()", "def OpenRegister(self, name, base, repeat, stride, indexing):\n self.register_name = name\n self.register_repeat = repeat\n self.typedef_name = string_util.SnakeToCamel(\n name.lower(), validate=False) + 'Registers'\n\n if repeat == 1:\n name_index = name\n index_str = '0'\n else:\n name_index = name + '(i)'\n if stride < 0:\n if indexing == 0:\n index_str = '-(i)'\n else:\n index_str = '{} - (i)'.format(indexing)\n else:\n if indexing == 0:\n index_str = '(i)'\n else:\n index_str = '(i) - {}'.format(indexing)\n self.output.write(\n '#define {} (((volatile {} *)0x{:08X})[{}])\\n'.format(\n name_index, self.typedef_name, base, index_str))\n self.output.write('typedef struct {\\n')", "def finish_registration(self):\r\n base_platform = self._args.get(\"base_platform\", None)\r\n lcls = {}\r\n try:\r\n exec(\"from platforms.{}.main import RootClass as rc; cl = rc\".format(base_platform), globals(), lcls)\r\n except ModuleNotFoundError as e:\r\n eprint(\"Package 'platforms.{}' or module 'main' wasn't found for creating platform instance '{}'!\".format(\r\n base_platform, self.name))\r\n raise e\r\n lcls[\"name\"] = self.name\r\n lcls[\"farm\"] = self._farm\r\n lcls[\"args\"] = self._args\r\n try:\r\n exec(\"inst = cl(name=name, farm=farm, **args)\", globals(), lcls)\r\n inst = lcls[\"inst\"]\r\n except Exception as e:\r\n eprint(\"Exception occurred when creating platform {} of {} kind!\\nException: {}\".format(\r\n self.name, base_platform, e))\r\n raise e\r\n # inst = PlatformBase(name=self.name, farm=self._farm, **self._args) # TODO: raise exception\r\n return inst", "def _new_instance(self):\n return self.__class__(self._vmodule)", "def on_register(cls):", "def reg(self, sVmParentPath, bNonInteractiveMode = False):\n\t\treturn Job(SDK.PrlVm_Reg(self.handle, sVmParentPath, bNonInteractiveMode)[0])", "def test_create_anonymous_classical_registers(self):\n cr1 = ClassicalRegister(size=3)\n cr2 = ClassicalRegister(size=3)\n self.assertNotEqual(cr1.name, cr2.name)", "def get_custom_register(self, index):\n\t\tif (index not in self.custom_registers):\n\t\t\tsys.stderr.write(\"WARNING : Custom register \" + str(index) + \" is not declared for \" + self._target_id +\"\\n\")\n\t\t\treturn None\n\n\t\treturn self.custom_registers[index]", "def __new__(cls, code: RegisterCode, info: str):\n if not isinstance(code, RegisterCode):\n raise TypeError(\"Code must be one of RegisterCode\")\n return super().__new__(cls, code, str(info))", "def onRegister(self):\n pass", "def onRegister(self):\n pass", "def forceRegister(self, name, value):\n pass", "def register(obj_name, obj):\n if obj_name not in ninja_globals['register']:\n ninja_globals['register'][obj_name] = obj", "def test_create_anonymous_classical_register(self):\n q_program = QuantumProgram()\n cr = q_program.create_classical_register(size=3)\n self.assertIsInstance(cr, ClassicalRegister)", "def _register(cls):\n clsid_path = \"Software\\\\Classes\\\\CLSID\\\\\" + cls._reg_clsid_\n progid_path = \"Software\\\\Classes\\\\\" + cls._reg_progid_\n spec = cls.__module__ + \".\" + cls.__name__\n\n # register the class information\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, clsid_path, win32con.REG_SZ, cls._reg_desc_)\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, clsid_path + \"\\\\ProgID\", win32con.REG_SZ, cls._reg_progid_)\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, clsid_path + \"\\\\PythonCOM\", win32con.REG_SZ, spec)\n hkey = win32api.RegCreateKey(win32con.HKEY_CURRENT_USER, clsid_path + \"\\\\InprocServer32\")\n win32api.RegSetValueEx(hkey, None, None, win32con.REG_SZ, pythoncom.__file__)\n win32api.RegSetValueEx(hkey, \"ThreadingModel\", None, win32con.REG_SZ, \"Both\")\n\n # and add the progid\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, progid_path, win32con.REG_SZ, cls._reg_desc_)\n win32api.RegSetValue(win32con.HKEY_CURRENT_USER, progid_path + \"\\\\CLSID\", win32con.REG_SZ, cls._reg_clsid_)", "def addVirtReg(self, vr: VirtualRegister):\n self.virtRegs[vr.name] = vr" ]
[ "0.62169015", "0.61992264", "0.6111969", "0.6111969", "0.6012539", "0.59799826", "0.5868822", "0.5843639", "0.57748914", "0.57584983", "0.57393277", "0.5698932", "0.5672466", "0.5543582", "0.5534334", "0.55243844", "0.5516907", "0.5514164", "0.5504319", "0.54888034", "0.5473833", "0.5458339", "0.5441336", "0.5414907", "0.5414907", "0.5412489", "0.5399749", "0.53996086", "0.5398377", "0.53817827" ]
0.6297248
0
inserts otherinstr before this instruction in this instruction's owner
def insertBefore(self, otherinstr): # pylint: disable=protected-access assert isinstance(otherinstr, ICode) if self.__prev is None: self.__prev = otherinstr otherinstr.__next = self otherinstr.owner = self.owner self.owner._firstInstr = otherinstr else: otherinstr.__next = self otherinstr.owner = self.owner otherinstr.__prev = self.__prev self.__prev = otherinstr otherinstr.__prev.__next = otherinstr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insertAfter(self, otherinstr):\n # pylint: disable=protected-access\n assert isinstance(otherinstr, ICode)\n if self.__next is None:\n self.__next = otherinstr\n otherinstr.__prev = self\n otherinstr.owner = self.owner\n self.owner._lastInstr = otherinstr\n else:\n otherinstr.__prev = self\n otherinstr.owner = self.owner\n otherinstr.__next = self.__next\n self.__next = otherinstr\n otherinstr.__next.__prev = otherinstr", "def insertInstr(self, instr: ICode):\n if self._firstInstr is None:\n self._firstInstr = self._lastInstr = instr\n instr.owner = self\n else:\n self._firstInstr.insertBefore(instr)", "def addInstr(self, instr: ICode):\n if self._firstInstr is None:\n self._firstInstr = self._lastInstr = instr\n instr.owner = self\n else:\n self._lastInstr.insertAfter(instr)", "def insert_before(self, insert_pos_inst):\n basic_block = insert_pos_inst.basic_block\n if basic_block is None:\n raise IRError('Instruction is not in basic block')\n idx = basic_block.insts.index(insert_pos_inst)\n self.basic_block = basic_block\n basic_block.insts.insert(idx, self)", "def prepend_inst(self, inst):\n inst.basic_block = self\n self.insts = [inst] + self.insts", "def insert_after(self, insert_pos_inst):\n basic_block = insert_pos_inst.basic_block\n if basic_block is None:\n raise IRError('Instruction is not in basic block')\n idx = basic_block.insts.index(insert_pos_inst)\n self.basic_block = basic_block\n basic_block.insts.insert(idx + 1, self)", "def DocumentElementInsertBefore(self):\n raise NotImplementedError()", "def getInstructionBefore(self, instruction: ghidra.program.model.listing.Instruction) -> ghidra.program.model.listing.Instruction:\n ...", "def _insert_op(self, op):", "def insertChildBefore(new_elem, elem):\n parent = DOM.getParent(elem)\n id = DOM.getChildIndex(parent, elem)\n DOM.insertChild(parent, new_elem, id)", "def possessed_by(self, other):\r\n self.owner = other", "def insert(self, rule, ident):\n raise NotImplementedError", "def insertBefore( self, node ):\n if isinstance( self, HtmlDomNode ) and isinstance( node, HtmlDomNode ):\n node.parentNode.before( node, self )", "def DocumentInlineBlipInsertAfterElement(self):\n raise NotImplementedError()", "def insert_element_before_similar(self, parent, new_child):\n new_tag = self.tag_base_name(new_child.tag)\n for i, child in enumerate(parent.getchildren()):\n if not self.tag_base_name_is(child, new_tag):\n parent.insert(i, new_child)\n break\n else:\n parent.append(new_child)", "def getInstructionBefore(self, address: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Instruction:\n ...", "def add_to_beginning(self, domino):\n self.chain.insert(0, domino)", "def append(self,instr):\n self.instructions.append(instr)", "def prependChild(self, *args):\n return _libsbml.ASTBasePlugin_prependChild(self, *args)", "def add_to(self, newowner):\n self.prevai = newowner.ai\n newowner.ai = self", "def before_insert(self, obj, st):\n pass", "def __iadd__(self, other):\n if isinstance(other, Token):\n new = Token(self.text + other.text, self.position, self.category)\n else:\n new = Token(self.text + other, self.position, self.category)\n return new", "def add_before(self, p, e):\n original = self._validate(p)\n return self._insert_between(e, original._prev, original)", "def prependChild(self, *args):\n return _libsbml.ASTNode_prependChild(self, *args)", "def before( self, src, target ):\n flag = False\n currPrevSiblingNode = None\n if src == None:\n src = self.firstChild()\n currPrevSiblingNode = src.getPreviousSiblingNode()\n self.setAsFirstChild( target )\n flag = True\n if isinstance( target, HtmlDomNode ) and isinstance( src, HtmlDomNode ):\n if not flag:\n currPrevSiblingNode = src.getPreviousSiblingNode()\n try:\n index = self.children.index( src )\n self.children.insert( index, target )\n target.setParentNode( self )\n \n except ValueError:\n raise Exception( \"source node object must be children of the parent object\" )\n src.setPreviousSiblingNode( target )\n target.setSiblingNode( src )\n target.setPreviousSiblingNode( currPrevSiblingNode )\n if currPrevSiblingNode :\n currPrevSiblingNode.setSiblingNode( target )\n else:\n raise Exception( \"Invalid node object. object must be of type HtmlDomNode.\" )", "def DocumentElementInsertAfter(self):\n raise NotImplementedError()", "def prepend(self, x):\n self.insert(0, x)", "def augment(self):\n n1 = { 'edges': [ self.next_insert['pred'], self.next_insert ], 'pred': self.next_insert['pred'] }\n n2 = { 'edges': [ n1, self.next_insert ], 'pred': n1 }\n self.next_insert['pred'] = n2\n self.next_insert = n2\n self.nodect += 2", "def add_before ( self ):\n self.add_item( 0 )", "def __iadd__(self, other):\n\t\t#print(\"iadd\")\t\t\n\t\t# merge other branch\t\t\n\t\tself.graph.update(other.graph)\n\t\tself.bottoms.update(other.bottoms)\n\t\tself.output_shape.update(other.output_shape)\n\t\tlayer_name = \"iadd_{}\".format(len(self.graph))\n\t\tself.graph[layer_name] = layer_name\n\t\tself.bottoms[layer_name] = [self.cur_id, other.cur_id]\n\t\tself.output_shape[layer_name] = self.cur_tensor.size()\n\t\tself.cur_id = layer_name\n\t\t# save memory\n\t\tdel other\t\t\n\t\treturn self" ]
[ "0.74177617", "0.73127705", "0.68408257", "0.6571505", "0.650014", "0.6094729", "0.58833224", "0.58634835", "0.5708529", "0.57056504", "0.5630455", "0.5542791", "0.5493814", "0.54496557", "0.5449439", "0.5430143", "0.54208475", "0.54148895", "0.53924996", "0.53715074", "0.5370465", "0.5267097", "0.52658755", "0.52631325", "0.52630997", "0.52435106", "0.5227798", "0.52274966", "0.5203387", "0.5180251" ]
0.8292218
0
inserts otherinstr after this instruction in this instruction's owner
def insertAfter(self, otherinstr): # pylint: disable=protected-access assert isinstance(otherinstr, ICode) if self.__next is None: self.__next = otherinstr otherinstr.__prev = self otherinstr.owner = self.owner self.owner._lastInstr = otherinstr else: otherinstr.__prev = self otherinstr.owner = self.owner otherinstr.__next = self.__next self.__next = otherinstr otherinstr.__next.__prev = otherinstr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addInstr(self, instr: ICode):\n if self._firstInstr is None:\n self._firstInstr = self._lastInstr = instr\n instr.owner = self\n else:\n self._lastInstr.insertAfter(instr)", "def insertBefore(self, otherinstr):\n # pylint: disable=protected-access\n assert isinstance(otherinstr, ICode)\n if self.__prev is None:\n self.__prev = otherinstr\n otherinstr.__next = self\n otherinstr.owner = self.owner\n self.owner._firstInstr = otherinstr\n else:\n otherinstr.__next = self\n otherinstr.owner = self.owner\n otherinstr.__prev = self.__prev\n self.__prev = otherinstr\n otherinstr.__prev.__next = otherinstr", "def insertInstr(self, instr: ICode):\n if self._firstInstr is None:\n self._firstInstr = self._lastInstr = instr\n instr.owner = self\n else:\n self._firstInstr.insertBefore(instr)", "def insert_after(self, insert_pos_inst):\n basic_block = insert_pos_inst.basic_block\n if basic_block is None:\n raise IRError('Instruction is not in basic block')\n idx = basic_block.insts.index(insert_pos_inst)\n self.basic_block = basic_block\n basic_block.insts.insert(idx + 1, self)", "def append(self,instr):\n self.instructions.append(instr)", "def DocumentElementInsertAfter(self):\n raise NotImplementedError()", "def DocumentInlineBlipInsertAfterElement(self):\n raise NotImplementedError()", "def __iadd__(self, other):\n\t\t#print(\"iadd\")\t\t\n\t\t# merge other branch\t\t\n\t\tself.graph.update(other.graph)\n\t\tself.bottoms.update(other.bottoms)\n\t\tself.output_shape.update(other.output_shape)\n\t\tlayer_name = \"iadd_{}\".format(len(self.graph))\n\t\tself.graph[layer_name] = layer_name\n\t\tself.bottoms[layer_name] = [self.cur_id, other.cur_id]\n\t\tself.output_shape[layer_name] = self.cur_tensor.size()\n\t\tself.cur_id = layer_name\n\t\t# save memory\n\t\tdel other\t\t\n\t\treturn self", "def insert_before(self, insert_pos_inst):\n basic_block = insert_pos_inst.basic_block\n if basic_block is None:\n raise IRError('Instruction is not in basic block')\n idx = basic_block.insts.index(insert_pos_inst)\n self.basic_block = basic_block\n basic_block.insts.insert(idx, self)", "def prepend_inst(self, inst):\n inst.basic_block = self\n self.insts = [inst] + self.insts", "def add_to(self, newowner):\n self.prevai = newowner.ai\n newowner.ai = self", "def _insert_op(self, op):", "def insertAfter( self, node ): \n if isinstance( self, HtmlDomNode ) and isinstance( node, HtmlDomNode ):\n node.parentNode.after( node, self )", "def __iadd__(self, other):\n if isinstance(other, Token):\n new = Token(self.text + other.text, self.position, self.category)\n else:\n new = Token(self.text + other, self.position, self.category)\n return new", "def replace_with(self, new_inst):\n new_inst.insert_after(self)\n self.replace_uses_with(new_inst)\n self.destroy()", "def possessed_by(self, other):\r\n self.owner = other", "def getInstructionAfter(self, instruction: ghidra.program.model.listing.Instruction) -> ghidra.program.model.listing.Instruction:\n ...", "def _add(self, other):\n return None", "def __iadd__(self, other):\n self.children.append(other)\n return self", "def __radd__(self, other):\n return Token(\n other + self.text, self.position - len(other), self.category)", "def insert_after(self,node,new_node):\n new_node.next = node.next\n node.next = new_node", "def after_insert(self, obj, st):\n pass", "def insert(self, rule, ident):\n raise NotImplementedError", "def after( self, src, target ):\n flag = False\n currNextSiblingNode = None\n if src == None:\n src = self.lastChild()\n currNextSiblingNode = src.getNextSiblingNode()\n self.setChild( target )\n flag = True\n if isinstance( target, HtmlDomNode ) and isinstance( src, HtmlDomNode ):\n if not flag:\n currNextSiblingNode = src.getNextSiblingNode()\n index = self.children.index( src )\n self.children.insert( index + 1, target )\n target.setParentNode( self )\n \n src.setSiblingNode( target )\n target.setPreviousSiblingNode( src )\n target.setSiblingNode( currNextSiblingNode )\n if currNextSiblingNode:\n currNextSiblingNode.setPreviousSiblingNode( target )\n else:\n raise Exception( \"Invalid node object. object must be of type Element.\" )", "def __radd__(self, other):\n if other is Ellipsis:\n return SkipTo(self)(\"_skipped\") + self\n\n return whitespaces.CURRENT.normalize(other) + self", "def InsertUnderLast(self, *args, **kwargs):\n pass", "def __iadd__(self, other):\n raise NotImplementedError(\"Implement this if needed\")", "def __add__(self, other):\n if isinstance(other, Token):\n return Token(self.text + other.text, self.position, self.category)\n else:\n return Token(self.text + other, self.position, self.category)", "def __iadd__(self, other):\n self.x += other.x\n self.y += other.y\n return self", "def InsertNextPoint(self, ):\n ..." ]
[ "0.719003", "0.7132219", "0.6927207", "0.65833426", "0.5970327", "0.5787707", "0.5783591", "0.56777227", "0.5648216", "0.5632277", "0.56003106", "0.5597215", "0.55759704", "0.5575936", "0.5554544", "0.544754", "0.54253715", "0.54174846", "0.5384279", "0.5366979", "0.53345114", "0.5304714", "0.53041476", "0.5302036", "0.5289618", "0.5283864", "0.52679026", "0.526455", "0.52644813", "0.52463555" ]
0.8242531
0
removes this instruction from this instruction's owner
def remove(self): # pylint: disable=protected-access if self.__next is None and self.__prev is None: self.owner._firstInstr = self.owner._lastInstr = None elif self.__next is None: self.owner._lastInstr = self.__prev self.__prev.__next = None elif self.__prev is None: self.owner._firstInstr = self.__next self.__next.__prev = None else: self.__prev.__next = self.__next self.__next.__prev = self.__prev self.__prev = self.__next = self.owner = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unpossessed(self):\r\n self.owner = None", "def removeInstruction(self, instruction: ghidra.program.model.listing.Instruction) -> None:\n ...", "def remove_from_hand(self):\n pass", "def remove_self(self):\n self.parent.remove(self.element)", "def remove():", "def remove(self):\n if self.basic_block is None:\n if self not in self.module.global_insts:\n raise IRError('Instruction is not in basic block or module')\n self.module.global_insts.remove(self)\n return\n self.basic_block.insts.remove(self)\n self.basic_block = None", "def remove(self):\n self.inp.inputs.discard(self)\n self.out.outputs.discard(self)", "def remove(self):", "def remove(self):\n pass", "def remove(self):\n pass", "def remove(self):\n pass", "def remove(self):\r\n\t\tself._delete()", "def remove(self):\n self.__source_gate._unregister_outgoing(self)\n self.__target_slot._unregister_incoming(self)", "def removeInstructionAt(self, address: ghidra.program.model.address.Address) -> None:\n ...", "def remove(self):\r\n game_ref.remove(self)", "def remove(ip):\n return __apf_cmd(\"-u {}\".format(ip))", "def remove_from_block(self):\n self.enclosing_block.remove_ops([self])", "def remove(self):\n raise NotImplementedError", "def remove(self):\n raise NotImplementedError", "def _remove(self):\n pass", "def remove():\n pass", "def remove(self, name):\n cont = getattr(self, name)\n self.disconnect(name)\n self._exprmapper.remove(name)\n if has_interface(cont, IComponent):\n self._depgraph.remove(name)\n for obj in self.__dict__.values():\n if obj is not cont and is_instance(obj, Driver):\n obj.workflow.remove(name)\n obj.remove_references(name)\n\n return super(Assembly, self).remove(name)", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()", "def remove(self):\n self._delete()" ]
[ "0.69388956", "0.69099617", "0.6750926", "0.6566112", "0.65368277", "0.65222853", "0.6515427", "0.6501087", "0.6485686", "0.6485686", "0.6485686", "0.6457478", "0.63775134", "0.63522595", "0.62998563", "0.6274236", "0.6253145", "0.6226511", "0.6226511", "0.61956424", "0.6189277", "0.6182241", "0.61406535", "0.61406535", "0.61406535", "0.61406535", "0.61406535", "0.61406535", "0.61406535", "0.61406535" ]
0.6978263
0
returns a list of operands read in this instruction
def getOperandsRead(self): # pylint: disable=no-self-use return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_ops(self):\n return self._read_ops", "def operartors(self) -> List[Operator]:\n return list(self.__ops.keys())", "def operators(self):\n return self._operators", "def repair_operators(self) -> List[Tuple[str, _OperatorType]]:\n return list(self._r_ops.items())", "def get_all(self):\n return self._name_to_operator.values()", "def getOperandsWritten(self):\n # pylint: disable=no-self-use\n return []", "def operators(self):\n return self.domain.operators.keys()", "def list_operators():\n for operator_symbol in operations:\n print(operator_symbol)", "def extract_operators(e, independent=False):\n ops = []\n\n if isinstance(e, Operator):\n ops.append(e)\n\n elif isinstance(e, Add):\n for arg in e.args:\n ops += extract_operators(arg, independent=independent)\n\n elif isinstance(e, Mul):\n for arg in e.args:\n ops += extract_operators(arg, independent=independent)\n else:\n if debug:\n print(\"Unrecongized type: %s: %s\" % (type(e), str(e)))\n\n return list(set(ops))", "def eval_ops(opcodes):\n output = []\n for op in opcodes:\n if op in [\"+\", \"*\"]:\n b = output.pop(-1)\n a = output.pop(-1)\n value = ops[op](a, b)\n output.append(value)\n else:\n output.append(op)\n\n assert len(output) == 1\n return output[0]", "def operation_list(self):\n return self._operation_list", "def operation_list(self):\n return self._operation_list", "def operation_list(self):\n return self._operation_list", "def operation_list(self):\n return self._operation_list", "def commutative_operators(self) -> List[Type[ast.operator]]:\n return [\n op for op in self.binary_operators if op in self.cummitative_operator_set\n ]", "def operands(app):\n return cdr(app)", "def _read_operands(self, operands_addr: int, operand_types: Tuple[ZMachineOperandTypes]) -> Tuple[Optional[Tuple[ZData]], int]:\n if operand_types is None:\n return None, operands_addr\n\n operands: List[ZData] = []\n\n addr = operands_addr\n for op_type in operand_types:\n if op_type in [ZMachineOperandTypes.SMALL_CONSTANT, ZMachineOperandTypes.VARIABLE]:\n operands.append(ZByte(self._memory, addr))\n addr += 1\n elif op_type == ZMachineOperandTypes.LARGE_CONSTANT:\n operands.append(ZWord(self._memory, addr))\n addr += 2\n\n return tuple(operands), addr", "def canonically_ordered(self, insn, as_strings=False):\n if disassembler.is_syntax_att(insn):\n ops = list(insn.operands)\n elif disassembler.is_syntax_intel(insn):\n ops = list(reversed(insn.operands))\n\n if as_strings:\n ops = [_ for _ in ops]\n return ops", "def get_op_types(self):\n return self.cur_config['ops']", "def operation_list(self):\n return OPERATION_LIST", "def get_ops():\n li = [\"EOF\",\"ADD\",\"SUB\",\"MUL\",\"DIV\",\"POW\",\"BITAND\",\"BITOR\",\"CMP\",\"GET\", \\\n \"SET\",\"NUMBER\",\"STRING\",\"GGET\",\"GSET\",\"MOVE\",\"DEF\",\"PASS\", \\\n \"JUMP\",\"CALL\",\"RETURN\",\"IF\",\"DEBUG\",\"EQ\",\"LE\",\"LT\",\"DICT\", \\\n \"LIST\",\"NONE\",\"LEN\",\"LINE\",\"PARAMS\",\"IGET\",\"FILE\",\"NAME\", \\\n \"NE\",\"HAS\",\"RAISE\",\"SETJMP\",\"MOD\",\"LSH\",\"RSH\",\"ITER\",\"DEL\", \\\n \"REGS\",\"BITXOR\", \"IFN\", \"NOT\", \"BITNOT\"]\n dic = {}\n for i in li:\n dic[i] = li.index(i)\n return dic", "def get_ops (self, names):\n return operator.attrgetter(names)(self.core) if isinstance(names,str) else [\n operator.attrgetter(n)(self.core) for n in names ]", "def extract_all_operators(e_orig):\n if debug:\n print(\"extract_all_operators: \", e_orig)\n\n if isinstance(e_orig, Operator):\n return [e_orig]\n\n e = drop_c_number_terms(normal_ordered_form(e_orig.expand(),\n independent=True))\n\n if isinstance(e, Pow) and isinstance(e.base, Operator):\n return [e]\n\n ops = []\n\n if isinstance(e, Add):\n for arg in e.args:\n ops += extract_all_operators(arg)\n\n if isinstance(e, Mul):\n op_f = [f for f in e.args if (isinstance(f, Operator) or\n (isinstance(f, Pow) and\n isinstance(f.base, Operator)))]\n ops.append(Mul(*op_f))\n ops += op_f\n\n unique_ops = list(set(ops))\n\n sorted_unique_ops = sorted(unique_ops, key=operator_order)\n\n return sorted_unique_ops", "def get_operations(self):\n op = self.act.get_operations()\n op.extend(Character.decr_attr)\n return op", "def __get_operator_arg_list(operator: PatternStructure):\n if isinstance(operator, CompositeStructure):\n return operator.args\n if isinstance(operator, UnaryStructure):\n return [operator.arg]\n # a PrimitiveEventStructure\n return [operator]", "def target_iops(self):\n return self._target_iops", "def readlink_ops(self):\n return self._readlink_ops", "def get_instructions(self):\n tmp_ins = []\n idx = 0\n for i in self.method.get_instructions():\n if idx >= self.start and idx < self.end:\n tmp_ins.append(i)\n\n idx += i.get_length()\n return tmp_ins", "def get_operator(self):\n if len(self) == 1:\n return self[0].get_operator()\n op = np.array(self._get_array_of_operators())\n return np.sum(op, axis=0)", "def get_operators(self):\n url = self.config['links']['accountAPI'] + 'operators'\n params = {\n 'client': self.client,\n 'country_code': self.locale_suffix\n }\n data = self.make_request(url, 'get', params=params)\n\n return data['data']['operators']" ]
[ "0.7080609", "0.70025116", "0.68571204", "0.68286264", "0.68182766", "0.6815364", "0.66542053", "0.6588963", "0.646638", "0.6445326", "0.63981086", "0.63981086", "0.63981086", "0.63981086", "0.63549936", "0.6339191", "0.6320942", "0.63162535", "0.618147", "0.6178464", "0.6151084", "0.61049557", "0.6091393", "0.6077711", "0.60400474", "0.5951625", "0.59500134", "0.58647084", "0.5853707", "0.5807514" ]
0.8149229
0
returns a list of operands written in this instruction
def getOperandsWritten(self): # pylint: disable=no-self-use return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getOperandsRead(self):\n # pylint: disable=no-self-use\n return []", "def operartors(self) -> List[Operator]:\n return list(self.__ops.keys())", "def operators(self):\n return self._operators", "def repair_operators(self) -> List[Tuple[str, _OperatorType]]:\n return list(self._r_ops.items())", "def get_all(self):\n return self._name_to_operator.values()", "def list_operators():\n for operator_symbol in operations:\n print(operator_symbol)", "def operators(self):\n return self.domain.operators.keys()", "def commutative_operators(self) -> List[Type[ast.operator]]:\n return [\n op for op in self.binary_operators if op in self.cummitative_operator_set\n ]", "def canonically_ordered(self, insn, as_strings=False):\n if disassembler.is_syntax_att(insn):\n ops = list(insn.operands)\n elif disassembler.is_syntax_intel(insn):\n ops = list(reversed(insn.operands))\n\n if as_strings:\n ops = [_ for _ in ops]\n return ops", "def extract_operators(e, independent=False):\n ops = []\n\n if isinstance(e, Operator):\n ops.append(e)\n\n elif isinstance(e, Add):\n for arg in e.args:\n ops += extract_operators(arg, independent=independent)\n\n elif isinstance(e, Mul):\n for arg in e.args:\n ops += extract_operators(arg, independent=independent)\n else:\n if debug:\n print(\"Unrecongized type: %s: %s\" % (type(e), str(e)))\n\n return list(set(ops))", "def eval_ops(opcodes):\n output = []\n for op in opcodes:\n if op in [\"+\", \"*\"]:\n b = output.pop(-1)\n a = output.pop(-1)\n value = ops[op](a, b)\n output.append(value)\n else:\n output.append(op)\n\n assert len(output) == 1\n return output[0]", "def operands(app):\n return cdr(app)", "def operation_list(self):\n return self._operation_list", "def operation_list(self):\n return self._operation_list", "def operation_list(self):\n return self._operation_list", "def operation_list(self):\n return self._operation_list", "def read_ops(self):\n return self._read_ops", "def operation_list(self):\n return OPERATION_LIST", "def get_operations(self):\n op = self.act.get_operations()\n op.extend(Character.decr_attr)\n return op", "def extract_all_operators(e_orig):\n if debug:\n print(\"extract_all_operators: \", e_orig)\n\n if isinstance(e_orig, Operator):\n return [e_orig]\n\n e = drop_c_number_terms(normal_ordered_form(e_orig.expand(),\n independent=True))\n\n if isinstance(e, Pow) and isinstance(e.base, Operator):\n return [e]\n\n ops = []\n\n if isinstance(e, Add):\n for arg in e.args:\n ops += extract_all_operators(arg)\n\n if isinstance(e, Mul):\n op_f = [f for f in e.args if (isinstance(f, Operator) or\n (isinstance(f, Pow) and\n isinstance(f.base, Operator)))]\n ops.append(Mul(*op_f))\n ops += op_f\n\n unique_ops = list(set(ops))\n\n sorted_unique_ops = sorted(unique_ops, key=operator_order)\n\n return sorted_unique_ops", "def get_instructions(self):\n tmp_ins = []\n idx = 0\n for i in self.method.get_instructions():\n if idx >= self.start and idx < self.end:\n tmp_ins.append(i)\n\n idx += i.get_length()\n return tmp_ins", "def get_formula_in_list(self):\n return tree_to_string(self.expression)", "def write_ops(self):\n return self._write_ops", "def get_ops():\n li = [\"EOF\",\"ADD\",\"SUB\",\"MUL\",\"DIV\",\"POW\",\"BITAND\",\"BITOR\",\"CMP\",\"GET\", \\\n \"SET\",\"NUMBER\",\"STRING\",\"GGET\",\"GSET\",\"MOVE\",\"DEF\",\"PASS\", \\\n \"JUMP\",\"CALL\",\"RETURN\",\"IF\",\"DEBUG\",\"EQ\",\"LE\",\"LT\",\"DICT\", \\\n \"LIST\",\"NONE\",\"LEN\",\"LINE\",\"PARAMS\",\"IGET\",\"FILE\",\"NAME\", \\\n \"NE\",\"HAS\",\"RAISE\",\"SETJMP\",\"MOD\",\"LSH\",\"RSH\",\"ITER\",\"DEL\", \\\n \"REGS\",\"BITXOR\", \"IFN\", \"NOT\", \"BITNOT\"]\n dic = {}\n for i in li:\n dic[i] = li.index(i)\n return dic", "def get_op_types(self):\n return self.cur_config['ops']", "def target_iops(self):\n return self._target_iops", "def unary_op(self):\n return plist([op(x) for x in self], root=self.__root__)", "def __get_operator_arg_list(operator: PatternStructure):\n if isinstance(operator, CompositeStructure):\n return operator.args\n if isinstance(operator, UnaryStructure):\n return [operator.arg]\n # a PrimitiveEventStructure\n return [operator]", "def get_operator(self):\n if len(self) == 1:\n return self[0].get_operator()\n op = np.array(self._get_array_of_operators())\n return np.sum(op, axis=0)", "def x_operators(self) -> List[PauliTerm]:\n x_matrix = self.x_operator_matrix()\n zeros = np.zeros_like(x_matrix, dtype='int')\n return [\n pauli_term_for_row(x_matrix[i, :], zeros[i, :])\n for i in range(self.k)\n ]" ]
[ "0.74284786", "0.6942272", "0.68793195", "0.67819464", "0.67440844", "0.67391163", "0.6701617", "0.64588183", "0.64523685", "0.64015955", "0.63703436", "0.63552004", "0.6314888", "0.6314888", "0.6314888", "0.6314888", "0.63003516", "0.62023795", "0.61812854", "0.60893553", "0.6086306", "0.6065275", "0.6028369", "0.6008808", "0.59356046", "0.5930297", "0.59203124", "0.58978814", "0.58815134", "0.5868612" ]
0.7680413
0
prettyprinter for dumping this function to _file
def prettyprint(self, _file): _file.write("Function %s returns %s\n" % (self.name, self.returnType)) _file.write(" local vars\n") for val in self.vars.values(): _file.write(" ") val.prettyprint(_file) _file.write(" params\n") for val in self.params.values(): _file.write(" ") val.prettyprint(_file) _file.write(" registers\n") for val in self.virtRegs.values(): _file.write(" ") val.prettyprint(_file) _file.write(" code\n") for instr in self.instrs(): if isinstance(instr, CLABEL): indent = " " else: indent = " " _file.write(indent + str(instr) + "\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pretty_print(self):\n pt = PrettyTable()\n for i in self.files_summary:\n pt.field_names = [\"File Name\", \"Classes\", \"Functions\", \"Lines\", \"Characters\"]\n pt.add_row(list([i, self.files_summary[i][\"class\"], self.files_summary[i][\"function\"], self.files_summary[i][\"line\"], self.files_summary[i][\"char\"]]))\n print(pt) #Using a Print statement here because i tried to return self.pt and it didnt give me anything but the print works", "def prettyprint(self, _file):\n for var in self.variables:\n var.prettyprint(_file)\n for fun in self.functions:\n fun.prettyprint(_file)", "def prettyprint(self, _file):\n xstr = \"reg \" + self.name + \" \" + self.type.desc()\n _file.write(xstr + \"\\n\")", "def prettyprint(self, _file):\n xstr = \"var \" + self.name + \" \" + self.type.desc()\n _file.write(xstr + \"\\n\")", "def pretty(self, **kwargs):\r\n raise NotImplementedError", "def ugly():\n\n global _pretty\n _pretty = False", "def _get_print_fn(file=sys.stdout):\n def _print_fn(op, xin,):\n for attr in op.attrs:\n temp = getattr(xin, attr)\n if callable(temp):\n pmsg = temp()\n else:\n pmsg = temp\n print(op.message, attr, '=', pmsg, file=file)\n return _print_fn", "def f_Dumpfname(func):\n @wraps(func)\n def echo_func(*func_args, **func_kwargs):\n if DEBUG: print('func \\033[1;31m {}()\\033[0m called by \\033[1;31m{}() \\033[0m'.format(func.__name__,sys._getframe(1).f_code.co_name))\n return func(*func_args, **func_kwargs)\n return echo_func", "def _PrintFunc(self, obj=None, verbose=False, summarize=True, recursive=False,\n use_pager=None, to_file=None):\n if obj is not None:\n self._printed_variables.append(obj)\n lines = describe.GenerateLines(\n obj, verbose=verbose, recursive=recursive, summarize=summarize,\n format_name='text')\n _WriteToStream(lines, use_pager=use_pager, to_file=to_file)", "def pretty_print_drt(self):\n self.drt_manager.pretty_print_drt()", "def print_out():\n pass", "def pretty_str(self) -> str:\n ...", "def _format(self):\n output = f\"\\n{color('>>> DUMP')} from {self.filename}: {color(f'L{self.line}')} in {color(f'{self.method}()')}\"\n\n for name, obj in self.objects.items():\n output += f\"\\n\\n{color(f' - {name}:')}\\n\"\n output += f\" {pformat(obj, width=110, indent=4)}\"\n\n output += color(\"\\n\\n<<< END\")\n return output", "def pretty_log(function_name, args):\n print(\"=\" * 100)\n print(\" \" * 40 + \"In function: \" + function_name + \"()\")\n print(\"-\" * 100)\n for arg in args:\n if isinstance(arg, dict):\n print(json.dumps(arg, indent=4))\n else:\n print(arg)\n print(\"\")\n print(\"=\" * 100)", "def __call__(self, format, filename):\n # turn the filename into something suitable for use in #define's\n prettyname = filename.replace(\".\", \"_\").upper()\n prettyname = prettyname.replace(\"/\", \"__\")\n prettyname = prettyname.replace(\":\", \"__\")\n prettyname = prettyname.replace(\"-\", \"__\")\n\n # try and open the file\n with open(filename, \"w\") as output:\n self.writeFuncsLut[format]( output, prettyname )", "def _prettyfilename(self):\n return f'{self.title} ({self.subtype})'", "def _prettyfilename(self):\n return f'{self.title} ({self.year})'", "def _prettyfilename(self):\n return self.title", "def action_to_pretty_str(action) :\n raise NotImplementedError", "def dump(self) -> None:\n ...", "def _print_custom(self):\n pass", "def printpretty(self):\n print(self.string_rep())", "def write_pretty(reviewer_data, file_obj):\n table = prettytable.PrettyTable(\n ('Reviewer',\n 'Reviews -2 -1 +1 +2 +A +/- %',\n 'Disagreements*'))\n for (name, r_data, d_data) in reviewer_data:\n r = '%7d %3d %3d %3d %3d %3d %s' % r_data\n d = '%3d (%s)' % d_data\n table.add_row((name, r, d))\n file_obj.write(\"%s\\n\" % table)", "def pprint_helper(self, angle, indent):\n # just here for defining the interface; work is done in subclasses\n pass", "def __str__(self):\n buf = io.StringIO()\n args.output.write(buf, self.root, self.headings)\n return buf.getvalue()", "def pformat(object):\r\n return PrettyPrinter().pformat(object)", "def parseprint(code, filename=\"<string>\", mode=\"exec\", **kwargs):\n node = parse(code, mode=mode) # An ode to the code\n print(dump(node, **kwargs))", "def prettyPrintStringHelper_ (s, stream, indent, pretty_print=True, indent_additive=4):\r\n stream.write(repr(s))", "def func_doc():", "def PrettyPrint(self):\r\n print(self.data)\r\n return" ]
[ "0.71779335", "0.7116922", "0.7084293", "0.70625824", "0.66019326", "0.6540435", "0.6426516", "0.64012384", "0.6324325", "0.6153158", "0.611711", "0.6105079", "0.6035712", "0.5985358", "0.59463036", "0.5938295", "0.59304595", "0.5902174", "0.58622736", "0.58602864", "0.58228546", "0.58136684", "0.5777618", "0.5758615", "0.5749495", "0.57489437", "0.5740538", "0.5707467", "0.5683615", "0.5680177" ]
0.79620916
0
Draws lines on the board, the first and last lines are not drawn because these lines are the ends of the screen
def draw_lines(self): for x_cord in range(0, Dimension.SCREEN_WIDTH.value, Dimension.SQUARE_WIDTH.value): pg.draw.line(self.window, Colors.BLACK.value, (x_cord, 0), (x_cord, Dimension.SCREEN_HEIGHT.value)) for y_cord in range(0, Dimension.SCREEN_HEIGHT.value, Dimension.SQUARE_HEIGHT.value): pg.draw.line(self.window, Colors.BLACK.value, (0, y_cord), (Dimension.SCREEN_WIDTH.value, y_cord)) pg.display.update()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_lines(self):\n # draw x lines\n y = self.step_y\n while y <= self.height:\n x = 0\n while x <= self.width:\n self.canvas.create_line(x, y, x+3.5, y)\n self.canvas.update()\n x += 3.5\n y += self.step_y\n \n # draw y lines\n x = self.step_x\n while x <= self.width:\n y = 0\n while y <= self.height:\n self.canvas.create_line(x, y, x, y+3.5)\n self.canvas.update()\n y += 3.5\n x += self.step_x\n \n self.is_operating = False", "def drawLines(self):\n\t\tintersections = [[], []]\n\t\tfor l in self.lines:\n\t\t\tif l.direction == 'v':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + int((self.width - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.width / 100) if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[0].append(position)\n\t\t\t\tfor yPos in range(1, self.height - 2):\n\t\t\t\t\tself.wts(yPos, position, '│', self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(0, position, '┬',self._borderColor)\n\t\t\t\tself.wts(self.height - 2, position, '┴', self._borderColor)\n\t\t\telif l.direction == 'h':\n\t\t\t\tif l.rtc:\n\t\t\t\t\tposition = l.coordinate + ((self.height - 1) / 2)\n\t\t\t\telse:\n\t\t\t\t\tposition = int((l.coordinate * self.height / 100) - 1 if type(l.coordinate) == float else l.coordinate)\n\t\t\t\tintersections[1].append(position)\n\t\t\t\tself.wts(position, 1, '─' * (self.width - 2), self._borderColor)\n\t\t\t\t# endpoints\n\t\t\t\tself.wts(position, 0, '├', self._borderColor)\n\t\t\t\tself.wts(position, self.width - 1, '┤', self._borderColor)\n\t\t# draw intersections\n\t\tfor x in intersections[1]:\n\t\t\tfor y in intersections[0]:\n\t\t\t\tself.wts(x, y, '┼', self._borderColor)\n\t\tself.verticalBoundaries = intersections[0]\n\t\tif self.screenBorder:\n\t\t\tself.verticalBoundaries.append(self.width)", "def draw_board(self):\n for i in range(0, 800, 80):\n if i == 80:\n pygame.draw.line(self.screen, 'black', (i, 80), (i, 800), width=3)\n pygame.draw.line(self.screen, (0, 0, 128), (0, i), (720, i), width=5)\n continue\n pygame.draw.line(self.screen, 'black', (i, 80), (i, 800), width=3)\n pygame.draw.line(self.screen, 'black', (0, i), (720, i), width=3)\n for j in range(240, 800, 240):\n pygame.draw.line(self.screen, (0, 0, 128), (j, 80), (j, 800), width=5)\n pygame.draw.line(self.screen, (0, 0, 128), (0, j + 80), (720, j + 80), width=5)\n pygame.draw.line(self.screen, (0, 0, 128), (0, 80), (0, 800), width=5)", "def draw_grid(self):\n for square in range(COLS+1):\n #vertical lines\n start_pos = (helpers.get_col_left_p(square),helpers.get_row_top_p(0))\n end_pos = (helpers.get_col_left_p(square),helpers.get_row_top_p(ROWS))\n pygame.draw.line(g.screen,WHITE,start_pos,end_pos)\n for square in range(ROWS+1):\n #horizontal lines\n start_pos = (helpers.get_col_left_p(0),helpers.get_row_top_p(square))\n end_pos = (helpers.get_col_left_p(COLS),helpers.get_row_top_p(square))\n pygame.draw.line(g.screen,WHITE,start_pos,end_pos)", "def draw_grid_lines(grid_display, screen_width, screen_height, box_width, box_height, line_color):\n\n for x in range(0, screen_width, box_width):\n for y in range(0, screen_height, box_height):\n pygame.draw.line(grid_display, line_color, (x, 0), (x, screen_height))\n pygame.draw.line(grid_display, line_color, (0, y), (screen_width, y))", "def draw_grid(self):\n for x in range(0, WIDTH, TILESIZE):\n pg.draw.line(self.screen, LIGHTGREY, (x, 0), (x, HEIGHT))\n \n for y in range(0, HEIGHT, TILESIZE):\n pg.draw.line(self.screen, LIGHTGREY, (0, y), (WIDTH, y))", "def draw_lines(display, coord, box_size, color, bg_color):\n left, top = coord\n stroke = 6\n half_stroke = int(stroke / 2)\n left = left + half_stroke\n top = top + half_stroke\n box_size = box_size - stroke\n for i in range(0, box_size, int(stroke + 2)):\n pygame.draw.line(\n display, color,\n (left, top + i),\n (left + i, top),\n stroke,\n )\n pygame.draw.line(\n display, color,\n (left + i, top + box_size - 1),\n (left + box_size - 1, top + i),\n stroke,\n )\n return", "def draw_grid(self):\n\n # Draw horizontal lines\n for row in range(self.num_rows + 1):\n left = row_column_to_pixels(row, 0)\n right = row_column_to_pixels(row, self.num_cols)\n pygame.draw.line(self.screen, COLOR_MAP['gray'], left, right)\n\n # Draw vertical lines\n for col in range(self.num_cols + 1):\n top = row_column_to_pixels(0, col)\n bottom = row_column_to_pixels(self.num_rows, col)\n pygame.draw.line(self.screen, COLOR_MAP['gray'], top, bottom)", "def drawGrid(w, rows, surface):\r\n sizeBtwn = w // rows\r\n\r\n x = 0\r\n y = 0\r\n for l in range(rows):\r\n x = x + sizeBtwn\r\n y = y + sizeBtwn\r\n\r\n #line color-white #start end\r\n # pygame.draw.line(surface, (255,255,255), (x,0), (x,w)) #vertical\r\n #pygame.draw.line(surface, (255,255,255), (0,y), (w,y)) #horizontal\r", "def draw_grid(self):\n for i in range(N * N + 1):\n color = \"blue\" if i % N == 0 else \"gray\"\n x0 = MARGIN + i * SIDE\n y0 = MARGIN\n x1 = MARGIN + i * SIDE\n y1 = HEIGHT - MARGIN\n self.canvas.create_line(x0, y0, x1, y1, fill=color)\n\n x0 = MARGIN\n y0 = MARGIN + i * SIDE\n x1 = WIDTH - MARGIN\n y1 = MARGIN + i * SIDE\n self.canvas.create_line(x0, y0, x1, y1, fill=color)", "def draw_grid(self):\n if self.grid_center == True:\n (n, m) = (self.n, self.m)\n (dx, dy) = (self.dx // 2, self.dy // 2)\n else:\n (n, m) = (self.n + 1, self.m + 1)\n (dx, dy) = (0, 0)\n\n x0 = self.x0 + dx\n y0 = self.y0 + dy\n\n # vertical lines\n for j in range(m):\n p0 = (x0 + j * self.dx, y0)\n p1 = (x0 + j * self.dx, y0 + (n-1) * self.dy)\n pygame.draw.line(self.screen, self.grid_col, p0, p1, self.grid_d) \n # horizontal lines\n for i in range(n):\n p0 = (x0, y0 + i * self.dy)\n p1 = (x0 + (m-1) * self.dx, y0 + i * self.dy)\n pygame.draw.line(self.screen, self.grid_col, p0, p1, self.grid_d)", "def drawGrid(self):\n for div in range(NBCELL):\n sec = SSIZE*div\n self.can.create_line(0, sec, GSIZE, sec, width=3, fill=GFILL)\n self.can.create_line(sec, 0, sec, GSIZE, width=3, fill=GFILL)", "def draw_grid():\r\n screen.fill((0,0,0))\r\n pygame.draw.line(screen, (255,255,255),(WIDTH/3,0),(WIDTH/3,HEIGHT))\r\n pygame.draw.line(screen, (255,255,255),(2*WIDTH/3,0),(2*WIDTH/3,HEIGHT))\r\n pygame.draw.line(screen, (255,255,255),(0,HEIGHT/3),(WIDTH,HEIGHT/3))\r\n pygame.draw.line(screen, (255,255,255),(0,2*HEIGHT/3),(WIDTH,2*HEIGHT/3))", "def draw(self, screen):\n lines = self.text.strip().split('\\n')\n y = self.y\n for line in lines:\n self.ui.show_text(line, (self.x, y), 30)\n y += 32", "def draw_board(self):\n self.window.fill(Colors.WHITE.value)\n self.draw_lines()\n self.draw_obstacles()", "def draw_board(self):\r\n for i in range(self.size):\r\n for k in range(self.size):\r\n left = k * self.CELL_SIZE + (k+1) * self.BORDER_WIDTH\r\n top = i * self.CELL_SIZE + (i+1) * self.BORDER_WIDTH\r\n rect = pygame.Rect(left, top, self.CELL_SIZE, self.CELL_SIZE)\r\n color = self.BG_COLOR\r\n if self.map[i][k] == self.BLOCK_CHAR:\r\n color = self.BLOCK_COLOR\r\n elif self.map[i][k] == self.START_CHAR:\r\n color = self.START_COLOR\r\n elif self.map[i][k] == self.END_CHAR:\r\n color = self.END_COLOR\r\n elif (k, i) in self.path:\r\n color = self.PATH_COLOR\r\n pygame.draw.rect(self.screen, color, rect)", "def display(self, screen: pygame.Surface, line_thickness=3):\n\t\tfor p1, p2 in self.__calculate_points():\n\t\t\tpygame.draw.line(screen, Color(255).get(), p1.get_int(), p2.get_int(), line_thickness)", "def draw(self):\n self.drawLine()\n\n for l in range(0, self.height):\n print(\"|\", end='', flush=True)\n for c in range(0, self.width):\n print(\" \" + str(self.grid[l][c]) + \" |\", end='', flush=True)\n print(\"\\n\", end='', flush=True)\n\n self.drawLine()", "def draw(self):\r\n pygame.draw.rect(self.screen, self.background_color, self.bounds)\r\n line_window = self.lines[self.scroll_window_top:self.scroll_window_bottom]\r\n for idx,line in enumerate(line_window):\r\n text = self.font.render(line, True, self.foreground_color)\r\n x,y = self._get_x_y_from_pos(self.position[0], self.position[1]+idx)\r\n self.screen.blit(text,(x,y))\r\n \r\n if self.cursor_visible and self.scroll_window_bottom == len(self.lines):\r\n x,y = self._get_x_y_from_pos(len(line_window[-1]), len(line_window))\r\n cursor_rect = pygame.Rect(x,y,\r\n self.text_width,self.text_height)\r\n pygame.draw.rect(self.screen, self.foreground_color, cursor_rect)", "def redraw_screen(self) -> None:\n # Reset screen to black\n self.screen.fill(Colors.BLACK)\n\n # Map values from array onto pygame window height\n arr = list_remap(ARRAY, (0, Game.HEIGHT))\n\n # Draw individual lines\n for index, value in enumerate(arr):\n # Round the value before working with it\n # This is necessary because pygame doesn't accept floats\n value = round(value)\n\n # Start with 10 units gap, draw lines with given separation between them\n x_pos = 10 + index * Game.SEPARATION\n # Subtract the value from height, pygame is inverted on Y axis\n y_pos = Game.HEIGHT - value\n\n pos1 = (x_pos, Game.HEIGHT)\n pos2 = (x_pos, y_pos)\n\n color = STATES[index]\n\n pygame.draw.line(self.screen, color, pos1, pos2)", "def draw_lines(self, img, lines, color=[255, 0, 0], thickness=5):\n # draw left and right lane lines\n for x1, y1, x2, y2 in self.get_lane_lines(lines, img):\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)", "def draw_lines(self, x, y,\n north=False, south=False, east=False, west=False,\n color=\"black\"):\n upper_left = (y * self.scale, x * self.scale)\n upper_right = (upper_left[0] + self.scale, upper_left[1])\n lower_left = (upper_left[0], upper_left[1] + self.scale)\n lower_right = (upper_left[0] + self.scale, upper_left[1] + self.scale)\n\n if north:\n self.canvas.create_line(*upper_left, *upper_right, fill=color)\n\n if south:\n self.canvas.create_line(*lower_left, *lower_right, fill=color)\n\n if east:\n self.canvas.create_line(*upper_right, *lower_right, fill=color)\n\n if west:\n self.canvas.create_line(*upper_left, *lower_left, fill=color)", "def drawLines(\n self,\n num_columns,\n num_rows,\n pan_x,\n pan_y,\n x_offset,\n y_offset,\n spacing\n ):\n\n # draw vertical lines\n for line in range(-num_columns, num_columns):\n line *= spacing\n grid_number = pan_x - x_offset - line\n\n self.__finalizeGridLineOpacity(grid_number)\n\n GL.glBegin(GL.GL_LINES)\n GL.glVertex3f(((line + x_offset) / self.aspect_ratio), num_rows * spacing, self.GRID_DEPTH)\n GL.glVertex3f(((line + x_offset) / self.aspect_ratio), -num_rows * spacing, self.GRID_DEPTH)\n GL.glEnd()\n # draw horizontal lines\n for line in range(-num_rows, num_rows):\n\n line *= spacing\n grid_number = pan_y + line - y_offset\n self.__finalizeGridLineOpacity(grid_number)\n\n GL.glBegin(GL.GL_LINES)\n GL.glVertex3f((num_columns * spacing) / self.aspect_ratio, line - y_offset, self.GRID_DEPTH)\n GL.glVertex3f((-num_columns * spacing) / self.aspect_ratio, line - y_offset, self.GRID_DEPTH)\n GL.glEnd()", "def drawGrid(self):\n\n if self.orientation == \"isometric\":\n for vline in range(0, self.map_array.shape[0]):\n line = self.canvas.create_line(iso(vline*self.cell_width, 0),\n iso(vline*self.cell_width, self.map_array.shape[0]*self.cell_height))\n self.canvas_objects.append(line)\n\n for hline in (range(0, self.map_array.shape[1])):\n line = self.canvas.create_line(iso(0, hline*self.cell_height),\n iso(self.map_array.shape[1]*self.cell_width, hline*self.cell_height))\n self.canvas_objects.append(line)\n self.canvas.bind(\"<Button-1>\", self.paintCells)\n self.canvas.bind(\"<Enter>\", self.drawFrame)\n self.canvas.bind(\"<Leave>\", self.killFrame)\n self.canvas.bind(\"<Motion>\", self.showFrame)", "def draw_line():\n\n # Small Size Line\n glLineWidth(0.1)\n glColor3f(0.5, 1.0, 0.9)\n wid = 0\n while wid <= width:\n length = 0\n while length <= height:\n glBegin(GL_LINES)\n glVertex3f(0.0, length, 0.0)\n glVertex3f(wid, length, 0)\n glEnd()\n glBegin(GL_LINES)\n glVertex3f(length, 0, 0.0)\n glVertex3f(length, wid, 0)\n glEnd()\n length += 10\n wid += 50\n # Medium Size Line\n glLineWidth(2.0)\n wid = 0\n while wid <= width:\n length = 0\n while length <= height:\n glBegin(GL_LINES)\n glVertex3f(0.0, length, 0.0)\n glVertex3f(wid, length, 0)\n glEnd()\n length += 50\n glBegin(GL_LINES)\n glVertex3f(length, 0, 0.0)\n glVertex3f(length, wid, 0)\n glEnd()\n wid += 50\n # Main Line\n # ordinat\n glLineWidth(1.5)\n glColor3f(0.5, 0.4, 0.8)\n glBegin(GL_LINES)\n glVertex3f(height / 2, 0, 0.0)\n glVertex3f(height / 2, width, 0)\n glEnd()\n # absis\n glBegin(GL_LINES)\n glVertex3f(0, width / 2, 0.0)\n glVertex3f(height, width / 2, 0)\n glEnd()", "def draw_fixed_lines(canvas):\n canvas.delete('all') # delete all existing lines from the canvas\n\n # Write your code below this line\n #################################\n # Bottom line\n canvas.create_line(GRAPH_MARGIN_SIZE, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, CANVAS_WIDTH-GRAPH_MARGIN_SIZE, CANVAS_HEIGHT-GRAPH_MARGIN_SIZE, width = LINE_WIDTH)\n # Top line\n canvas.create_line(GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, CANVAS_WIDTH-GRAPH_MARGIN_SIZE, GRAPH_MARGIN_SIZE, width = LINE_WIDTH)\n # Vertical lines and year\n for index in range (len(YEARS)):\n canvas.create_line(GRAPH_MARGIN_SIZE + (CANVAS_WIDTH-GRAPH_MARGIN_SIZE*2)*index/len(YEARS),\n 0,\n GRAPH_MARGIN_SIZE + (CANVAS_WIDTH-GRAPH_MARGIN_SIZE*2)*index/len(YEARS),\n CANVAS_HEIGHT, width = LINE_WIDTH)\n canvas.create_text(GRAPH_MARGIN_SIZE + (CANVAS_WIDTH - GRAPH_MARGIN_SIZE * 2) * index / len(YEARS) + TEXT_DX,\n CANVAS_HEIGHT - GRAPH_MARGIN_SIZE + TEXT_DX,\n text = YEARS[index],\n anchor = tkinter.NW)", "def draw_gameBoard(self):\n\n # 15 horizontal lines\n for i in range(9):\n start_pixel_x = (i + 1) * CELL_PIXELS\n start_pixel_y = (0 + 1) * CELL_PIXELS\n end_pixel_x = (i + 1) * CELL_PIXELS\n end_pixel_y = (9 + 1) * CELL_PIXELS\n self.create_line(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y)\n\n # 15 vertical lines\n for j in range(9):\n start_pixel_x = (0 + 1) * CELL_PIXELS\n start_pixel_y = (j + 1) * CELL_PIXELS\n end_pixel_x = (9 + 1) * CELL_PIXELS\n end_pixel_y = (j + 1) * CELL_PIXELS\n self.create_line(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y)\n\n # place a \"star\" to particular intersections\n self.draw_star(3, 3)\n self.draw_star(7, 7)", "def drawBoard(self):\r\n \r\n for i in range(8):\r\n for j in range(8):\r\n if (i %2 == 0 and j % 2 == 0) or (i % 2 !=0 and j % 2 != 0):\r\n COLOR = COLOR1\r\n else: COLOR = COLOR2\r\n pygame.draw.rect(screen, COLOR, Rect(i*50, j*50, 50, 50))\r\n\r\n self.drawLabels()\r\n \r\n if not self.piecesDrawn:\r\n self.drawPieces()\r\n self.piecesDrawn = True", "def draw_line():\n global y1, y2\n canvas.create_line(x1, y1, x2, y2, width=2, fill=color)\n y1 -= 10\n y2 += 10", "def render_lines(self, line_cells):\n for cell in line_cells:\n self.surface.set_at(cell.tuple('2D'), YELLOW)" ]
[ "0.81675446", "0.76167536", "0.7486768", "0.7479164", "0.73604876", "0.7308902", "0.7301014", "0.7262629", "0.7260224", "0.7067093", "0.7031589", "0.7013453", "0.7009544", "0.6995654", "0.69763374", "0.6916263", "0.6910306", "0.6907244", "0.6904076", "0.68767136", "0.68581563", "0.6858096", "0.684643", "0.6845427", "0.67944366", "0.673384", "0.67218864", "0.67171127", "0.670784", "0.6705536" ]
0.78859526
1
Obstacles created by self.create_obstacles are drawn on self.windows as a black rectangles.
def draw_obstacles(self): for obstacle in self.obstacles: obstacle.draw(self.window, Colors.BLACK.value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def draw_obstacles():\n for obstacle in obstacles:\n plt.gca().add_patch(obstacle)", "def place_obstacles():\n #Randomly generate different sized rectangles\n #Soem may overlap, which gives more variety in shape of obstacles\n xvals = np.random.randint(0,self.map_dimensions[1],size=self.N_obstacles)\n yvals = np.random.randint(0,self.map_dimensions[0],size=self.N_obstacles)\n lower_left = zip(xvals,yvals)\n rects = []\n for LL in lower_left:\n x = LL[0]\n y = LL[1]\n wmax = self.map_dimensions[1] - x\n w = np.random.randint(0,wmax,size=1)[0]\n hmax = self.map_dimensions[0] - y\n h = np.random.randint(0,hmax,size=1)[0]\n rects += [(x,y,w,h)]\n self.coordinates__obstacles = rects", "def recreate_obstacles(self):\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.obstacles = self.create_obstacles()", "def draw_board(self):\n self.window.fill(Colors.WHITE.value)\n self.draw_lines()\n self.draw_obstacles()", "def generate_obstacles(self):\r\n obstacles = self.get_obstable_metrics\r\n obstacle_arrays = []\r\n\r\n for nb_obstacle in obstacles:\r\n empty_array = np.zeros(shape=(self.WINDOW_HEIGHT,\r\n self.WINDOW_WIDTH))\r\n start_location = 0 if nb_obstacle[2] == 1 else self.WINDOW_HEIGHT\r\n y, x = start_location - 1, nb_obstacle[3]\r\n empty_array[y, x] = -1\r\n\r\n for w_value in range(nb_obstacle[0]):\r\n x_updated = x + w_value\r\n\r\n for h_value in range(nb_obstacle[1]):\r\n if nb_obstacle[2] == 1:\r\n y_updated = y + h_value\r\n else:\r\n y_updated = y - h_value\r\n # Replace Value\r\n empty_array[y_updated, x_updated] = -1\r\n\r\n new_array = self.trim_whitespace(empty_array,\r\n nb_obstacle[2],\r\n self.MIN_GAP)\r\n obstacle_arrays.append(new_array)\r\n\r\n return obstacle_arrays", "def spawn_obstacles(self):\n self.obstacle_sprites.empty()\n number_of_obstacles = random.randint(MIN_OBSTACLES, MAX_OBSTACLES)\n while len(self.obstacle_sprites) < number_of_obstacles:\n obstacle = Obstacle(random.randrange(0, WIDTH), random.randrange(HEIGHT - 500, HEIGHT))\n obstacle_collision = pygame.sprite.spritecollide(obstacle, self.obstacle_sprites, False)\n if not obstacle_collision:\n self.obstacle_sprites.add(obstacle)", "async def show_obstacles(canvas):\n\n while True:\n boxes = []\n\n for obstacle in obstacle_manager:\n boxes.append(obstacle.get_bounding_box())\n\n for x, y, frame in boxes:\n draw_frame(canvas, x, y, frame)\n\n await Sleep(1)\n\n for x, y, frame in boxes:\n draw_frame(canvas, x, y, frame, negative=True)", "def reset_obstacles(self):\n self.obstacles = np.array([])", "def create_obstacles(self) -> List[Square]:\n obstacles_number = random.randint(1, self.maximum_obstacles_on_board)\n obstacles = list()\n\n while len(obstacles) < obstacles_number:\n\n obstacle_x_pos = random.randint(0, Dimension.board_width() - 1)\n obstacle_y_pos = random.randint(0, Dimension.board_height() - 1)\n obstacle = Square(obstacle_x_pos, obstacle_y_pos)\n if obstacle not in obstacles:\n self.board_matrix[obstacle_y_pos][obstacle_x_pos] = 0\n obstacles.append(obstacle)\n\n return obstacles", "def __init__(self, window: pg.Surface):\n self.window = window\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.maximum_obstacles_on_board = 10\n self.obstacles = self.create_obstacles()", "def draw_board(self):\n pygame.draw.rect(background, BLACK, self.outline, 3)\n # Outline is inflated here for future use as a collidebox for the mouse\n self.outline.inflate_ip(20, 20)\n for i in range(self.size-1):\n for j in range(self.size-1):\n rect = pygame.Rect(5+GRID_SIZE+(GRID_SIZE*i), 5+GRID_SIZE+(GRID_SIZE*j), GRID_SIZE, GRID_SIZE)\n pygame.draw.rect(background, COLOR[BLACK], rect, 1)\n if self.size >= 13:\n for i in range(3):\n for j in range(3):\n coords = (5+4*GRID_SIZE+(GRID_SIZE*6*i), 5+4*GRID_SIZE+(GRID_SIZE*6*j))\n pygame.draw.circle(background, COLOR[BLACK], coords, 5, 0)\n screen.blit(background, (0, 0))\n pygame.display.update()", "async def show_obstacles(canvas):\n\n while True:\n boxes = []\n\n for obstacle in OBSTACLES:\n boxes.append(obstacle.dump_bounding_box())\n\n for row, column, frame in boxes:\n draw_frame(canvas, row, column, frame)\n\n await asyncio.sleep(0)\n\n for row, column, frame in boxes:\n draw_frame(canvas, row, column, frame, negative=True)", "def draw(self, win):\n for y in range(len(self.board)):\n for x, color in enumerate(self.board[y]):\n pygame.draw.rect(win, color, (self.x+x*self.cell_size, self.y+y*self.cell_size,\n self.cell_size, self.cell_size), 0)\n\n pygame.draw.rect(win, (0, 0, 0), (self.x, self.y, self.width, self.height), BORDER_THICKNESS)", "def begin_draw(self):\n pygame.init()\n self.display = pygame.display.set_mode(self.disp_size)\n pygame.display.set_caption('Map Editing')\n font = pygame.font.SysFont(\"arial\", 15)\n strings = [\"Press ESC to Start Drawing Obstacles\",\n \"Click Left to Draw & Right to Erase\",\n \"To finish Drawing,press Escape \",\n \"During search, Escape or Close to Quit\",\n \"you can also draw during the search, but it won't ba saved\"]\n texts = [font.render(s, True, (255, 255, 255)) for s in strings]\n for i, text in enumerate(texts):\n self.display.blit(text, (self.disp_size[0]//20, i*20+self.disp_size[1]//20))\n pygame.display.update()\n main_screen = True\n while main_screen:\n print(\"Waiting for start\")\n event = pygame.event.wait()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n main_screen = False\n self.display.fill([255, 255, 255])\n grid.draw(self.display)\n pygame.display.update()\n print(\"Now painting\")\n while True:\n event = pygame.event.wait()\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n break\n pos = list((np.array(pygame.mouse.get_pos())/self.block_size).astype(int))\n if pygame.mouse.get_pressed() == (1, 0, 0):\n print(\"Add wall at\", pos)\n grid[pos].type = \"WALL\"\n grid[pos].draw(self.display, self.block_size)\n elif pygame.mouse.get_pressed() == (0, 0, 1):\n print(\"remove wall from\", pos)\n grid[pos].type = \"ROAD\"\n grid[pos].draw(self.display, self.block_size)\n pygame.display.update()", "def draw_board(self):\r\n for i in range(self.size):\r\n for k in range(self.size):\r\n left = k * self.CELL_SIZE + (k+1) * self.BORDER_WIDTH\r\n top = i * self.CELL_SIZE + (i+1) * self.BORDER_WIDTH\r\n rect = pygame.Rect(left, top, self.CELL_SIZE, self.CELL_SIZE)\r\n color = self.BG_COLOR\r\n if self.map[i][k] == self.BLOCK_CHAR:\r\n color = self.BLOCK_COLOR\r\n elif self.map[i][k] == self.START_CHAR:\r\n color = self.START_COLOR\r\n elif self.map[i][k] == self.END_CHAR:\r\n color = self.END_COLOR\r\n elif (k, i) in self.path:\r\n color = self.PATH_COLOR\r\n pygame.draw.rect(self.screen, color, rect)", "def draw_gameBoard(self):\n\n # 15 horizontal lines\n for i in range(9):\n start_pixel_x = (i + 1) * CELL_PIXELS\n start_pixel_y = (0 + 1) * CELL_PIXELS\n end_pixel_x = (i + 1) * CELL_PIXELS\n end_pixel_y = (9 + 1) * CELL_PIXELS\n self.create_line(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y)\n\n # 15 vertical lines\n for j in range(9):\n start_pixel_x = (0 + 1) * CELL_PIXELS\n start_pixel_y = (j + 1) * CELL_PIXELS\n end_pixel_x = (9 + 1) * CELL_PIXELS\n end_pixel_y = (j + 1) * CELL_PIXELS\n self.create_line(start_pixel_x, start_pixel_y, end_pixel_x, end_pixel_y)\n\n # place a \"star\" to particular intersections\n self.draw_star(3, 3)\n self.draw_star(7, 7)", "def __generate_rectangle_obstacles(self, world):\n obs_min_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"min_dim\"]\n obs_max_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"max_dim\"]\n obs_max_combined_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"max_combined_dim\"]\n obs_min_count = self.cfg[\"obstacle\"][\"rectangle\"][\"min_count\"]\n obs_max_count = self.cfg[\"obstacle\"][\"rectangle\"][\"max_count\"]\n obs_min_dist = self.cfg[\"obstacle\"][\"rectangle\"][\"min_distance\"]\n obs_max_dist = self.cfg[\"obstacle\"][\"rectangle\"][\"max_distance\"]\n\n # generate the obstacles\n obstacles = []\n obs_dim_range = obs_max_dim - obs_min_dim\n obs_dist_range = obs_max_dist - obs_min_dist\n num_obstacles = randrange(obs_min_count, obs_max_count + 1)\n\n test_geometries = [r.global_geometry for r in world.robots]\n while len(obstacles) < num_obstacles:\n # generate dimensions\n width = obs_min_dim + (random() * obs_dim_range )\n height = obs_min_dim + (random() * obs_dim_range )\n while width + height > obs_max_combined_dim:\n height = obs_min_dim + (random() * obs_dim_range )\n\n # generate position\n dist = obs_min_dist + (random() * obs_dist_range)\n phi = -pi + (random() * 2 * pi)\n x = dist * sin(phi)\n y = dist * cos(phi)\n\n # generate orientation\n theta = -pi + (random() * 2 * pi)\n\n # test if the obstacle overlaps the robots or the goal\n obstacle = RectangleObstacle(width, height, Pose(x, y, theta))\n intersects = False\n for test_geometry in test_geometries:\n intersects |= geometrics.convex_polygon_intersect_test(test_geometry, obstacle.global_geometry)\n if not intersects:\n obstacles.append(obstacle)\n return obstacles", "def draw(self):\n if self.master != None :\n fill = Cell.FILLED_COLOR_BG\n outline = Cell.FILLED_COLOR_BORDER\n\n if not self.fill:\n fill = Cell.EMPTY_COLOR_BG\n outline = Cell.EMPTY_COLOR_BORDER\n walls[self.ord][self.abs] = 0\n else:\n walls[self.ord][self.abs] = 1\n\n\n xmin = self.abs * self.size\n xmax = xmin + self.size\n ymin = self.ord * self.size\n ymax = ymin + self.size\n self.master.create_rectangle(xmin, ymin, xmax, ymax, fill = fill, outline = outline)", "def get_obstacles_map(obstacles, placed_pecies):\n \n #create a mask image to draw the obstacles on\n blocks = np.zeros(ARENA_SIZE[::-1], np.uint8)\n\n #get the grid points where the robot needs to placed\n grid = get_grid(ARENA_SIZE)\n\n #draw the obstacles and their safety region on the map\n for i in obstacles.keys():\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(obstacles[i][0]/4), i[1]-int(obstacles[i][1]/4)), (i[0]+int(obstacles[i][0]/4), i[1]+int(obstacles[i][1]/4)), 255, -1)\n\n #draw the obstacles and their safety region on the map\n for i in placed_pecies.keys():\n try:\n if not i == grid[5]:\n cv2.circle(blocks, i, int(CIRCULAR_SAFETY_FACTOR*BLOCK_SIZE[0]), 129, -1)\n else:\n cv2.rectangle(blocks, (int(i[0]-7.4*placed_pecies[i][0]/4), int(i[1]-7.4*placed_pecies[i][1]/4)),\n (int(i[0]+7.4*placed_pecies[i][0]/4), int(i[1]+7.4*placed_pecies[i][1]/4)), 129, -1)\n cv2.rectangle(blocks, (i[0]-int(placed_pecies[i][0]/4), i[1]-int(placed_pecies[i][1]/4)), (i[0]+int(placed_pecies[i][0]/4), i[1]+int(placed_pecies[i][1]/4)), 255, -1)\n except Exception as e:\n print(e)\n\n return cv2.bitwise_not(blocks)", "def draw_windows():\n martin.begin_fill() # lines 88-118 draw out a row consisting of 3 rectangles for windows\n for i in range(2):\n martin.pendown()\n martin.forward(13)\n martin.right(90)\n martin.forward(20)\n martin.right(90)\n martin.penup()\n martin.end_fill()\n\n martin.forward(30)\n martin.begin_fill()\n for i in range(2):\n martin.pendown()\n martin.forward(13)\n martin.right(90)\n martin.forward(20)\n martin.right(90)\n martin.penup()\n martin.end_fill()\n\n martin.forward(30)\n martin.begin_fill()\n for i in range(2):\n martin.pendown()\n martin.forward(13)\n martin.right(90)\n martin.forward(20)\n martin.right(90)\n martin.penup()\n martin.end_fill()\n martin.hideturtle()", "def updateHardObstacles(self):\r\n global_obs = self.calcGlobalObstaclePosition([[10, 20],[10, 0],[10, -20]])\r\n self.globalHardObstaclesList.extend(global_obs)", "def draw(self): \n pygame.event.clear()\n self.window = ocempgui.widgets.Box(GG.utils.SCREEN_SZ[0], GG.utils.SCREEN_SZ[1])\n self.paintScreen()\n self.paintAvatar()\n self.paintTags()\n self.paintCustomizeZone()\n self.paintButtons()\n self.window.zOrder = 90000\n self.window.depth = 2\n return self.window", "def check_for_obstacles(self):\n obs = False\n obs_p = []\n for point in self.obstacles:\n if -0.15 <= point[1] <= 0.15: # robot is 178mm wide\n # Obstacles should be less than or equal to 0.2 m away before being detected\n if 0 <= point[0] <= .2:\n obs_p.append(point)\n obs = True\n if obs:\n pos = self.determine_pos_of_obstacle(obs_p)\n data = Obstacle()\n data.x = pos[0]\n data.y = pos[1]\n data.obstacle = True\n self.obs_pub.publish(data)", "def draw(self, win):\n pygame.draw.rect(win, self.color, self.rect)", "def _draw_blocks(self):\n\t\tsurface = pygame.display.get_surface()\n\t\tcolors = {\"J\": (15, 105, 245), \"I\": (85, 235, 255), \n\t\t\t\t \"L\":(255, 170, 0), \"S\": (45, 255, 55), \"Z\": (255, 4, 0),\n\t\t\t\t \"O\": (238, 255, 0), \"T\": (245, 0, 255)}\n\t\ty = math.floor((self.window_height - (self.window_height*0.9))/2)\n\t\tx = math.floor((self.window_width - ((self.window_height*0.9)/20)*10)/2)\n\t\tincrement = math.floor((self.window_height*0.9)/20)\n\t\t# loops through board and draws to the correct spot\n\t\tfor i in range(4, len(self.gameboard.get_board())):\n\t\t\tfor j in range(len(self.gameboard.get_board()[i])):\n\t\t\t\tx_incremented = math.floor(x + (increment * j))\n\t\t\t\ty_incremented = math.floor(y + (increment * (i-4)))\n\t\t\t\tif self.gameboard.get_board()[i][j][0] in colors:\n\t\t\t\t\tpygame.draw.rect(surface, colors[self.gameboard.get_board()[i][j][0]],\n\t\t\t\t\t\t\t\t\t(x_incremented, y_incremented, increment, increment))\n\t\t\t\t\t\t\t\t\t# x, y, x_wid, y_len\n\t\t\t\telse:\n\t\t\t\t\tpygame.draw.rect(surface, (0,0,0),\n\t\t\t\t\t\t\t\t\t(x_incremented, y_incremented, increment, increment))", "def drawBoard(self):\r\n \r\n for i in range(8):\r\n for j in range(8):\r\n if (i %2 == 0 and j % 2 == 0) or (i % 2 !=0 and j % 2 != 0):\r\n COLOR = COLOR1\r\n else: COLOR = COLOR2\r\n pygame.draw.rect(screen, COLOR, Rect(i*50, j*50, 50, 50))\r\n\r\n self.drawLabels()\r\n \r\n if not self.piecesDrawn:\r\n self.drawPieces()\r\n self.piecesDrawn = True", "def create(self, pygame):\n\n white = (255,255,255)\n self.obstacle_img = pygame.image.load(\"./Images/Obstacle.png\").convert()\n self.obstacle_img.set_colorkey(white)\n\n for i in range(8):\n self.random_objects.append(pygame.image.load(\"./Images/Object{}.png\".format(i+1)).convert())\n # self.random_objects[i].set_colorkey(white)", "def getInitialObstacles():\n # hardcode number of blocks\n # will account for movemnet\n from random import choice\n from globals import TILEWIDTH, TILEHEIGHT, WINHEIGHT, TILEFLOORHEIGHT, LEVEL, HALFWINWIDTH\n\n no_of_blocks = 50\n for b in range(no_of_blocks // 2):\n # get image\n # image = globals.IMAGESDICT['rock']\n for y in range(1,5):\n image = globals.IMAGESDICT[choice(['ugly tree', 'rock', 'tall tree'])]\n # make rect\n spaceRect = pygame.Rect((b * TILEWIDTH, y * TILEFLOORHEIGHT, TILEWIDTH, TILEFLOORHEIGHT))\n landscape = Landscape(image, spaceRect)\n allLandscapeList.add(landscape)\n allSpriteList.add(landscape)\n\n image = globals.IMAGESDICT['corner']\n negativeRect = pygame.Rect([-150, WINHEIGHT - TILEHEIGHT, TILEWIDTH, TILEHEIGHT])\n landscape = Landscape(image, negativeRect)\n allLandscapeList.add(landscape)\n allSpriteList.add(landscape)\n\n image = globals.IMAGESDICT['corner']\n positiveRect = pygame.Rect([LEVEL[0] - TILEWIDTH, WINHEIGHT - TILEHEIGHT, TILEWIDTH, TILEFLOORHEIGHT])\n landscape = Landscape(image, positiveRect)\n allLandscapeList.add(landscape)\n allSpriteList.add(landscape)\n\n bottomRect = pygame.Rect([HALFWINWIDTH, LEVEL[1] - TILEHEIGHT, TILEWIDTH, TILEFLOORHEIGHT])\n landscape = Landscape(image, bottomRect)\n allLandscapeList.add(landscape)\n allSpriteList.add(landscape)\n\n for x in range(0, LEVEL[0], 50):\n for y in range(10):\n image = globals.IMAGESDICT[choice(['ugly tree', 'rock', 'tall tree'])]\n spaceRect = pygame.Rect((x, LEVEL[1] - (y * TILEHEIGHT), TILEWIDTH, TILEFLOORHEIGHT))\n landscape = Landscape(image, spaceRect)\n if choice([0,1,0]):\n allLandscapeList.add(landscape)\n allSpriteList.add(landscape)\n\n\n return", "def clear_board(self):\n pygame.draw.rect(self.display, self.white, pygame.Rect(0, 0, self.window_x, self.window_y))\n self.draw_grid()", "def create_world(self):\n for row in range(self.cell_row):\n for col in range(self.cell_col):\n x1 = col * self.cell_size\n y1 = row * self.cell_size\n x2 = x1 + self.cell_size\n y2 = y1 + self.cell_size\n\n if (self.world_status.now[row, col]):\n self.world[row, col] = self.canvas.create_rectangle(\n x1, y1, x2, y2,\n fill = self.color_alive,\n outline = \"gray\",\n tags = \"rect\")\n else:\n self.world[row, col] = self.canvas.create_rectangle(\n x1, y1, x2, y2,\n fill = self.color_dead,\n outline = \"gray\",\n tags = \"rect\")" ]
[ "0.70438933", "0.69378084", "0.68815863", "0.685203", "0.6783483", "0.666752", "0.6614215", "0.6580512", "0.65752584", "0.6495871", "0.64657646", "0.6459727", "0.6416091", "0.63066995", "0.6299761", "0.6271255", "0.61703116", "0.61362374", "0.60891026", "0.6071949", "0.6070478", "0.6067121", "0.6062985", "0.605748", "0.60257816", "0.6013591", "0.59956706", "0.5992753", "0.5979203", "0.5972849" ]
0.85527563
0
Function creates from 1 to 10 obstacles with random coordinates. The self.matrix is modified to reflect the changes to on the board
def create_obstacles(self) -> List[Square]: obstacles_number = random.randint(1, self.maximum_obstacles_on_board) obstacles = list() while len(obstacles) < obstacles_number: obstacle_x_pos = random.randint(0, Dimension.board_width() - 1) obstacle_y_pos = random.randint(0, Dimension.board_height() - 1) obstacle = Square(obstacle_x_pos, obstacle_y_pos) if obstacle not in obstacles: self.board_matrix[obstacle_y_pos][obstacle_x_pos] = 0 obstacles.append(obstacle) return obstacles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recreate_obstacles(self):\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.obstacles = self.create_obstacles()", "def place_obstacles():\n #Randomly generate different sized rectangles\n #Soem may overlap, which gives more variety in shape of obstacles\n xvals = np.random.randint(0,self.map_dimensions[1],size=self.N_obstacles)\n yvals = np.random.randint(0,self.map_dimensions[0],size=self.N_obstacles)\n lower_left = zip(xvals,yvals)\n rects = []\n for LL in lower_left:\n x = LL[0]\n y = LL[1]\n wmax = self.map_dimensions[1] - x\n w = np.random.randint(0,wmax,size=1)[0]\n hmax = self.map_dimensions[0] - y\n h = np.random.randint(0,hmax,size=1)[0]\n rects += [(x,y,w,h)]\n self.coordinates__obstacles = rects", "def init_map(self, obstacle_rate=0.9):\n n = self.size()\n\n map_obstacles = [] # np.zeros((n, n)) # 1: obstacle, 0: non-obstacle\n \n for i in range(n):\n # We only need 2 bit to encode 1/0 for each element of NumberArray\n row = NumberArray(2, n)\n for j in range(n):\n if i == j:\n # map_obstacles[i][j] = 0\n row[j] = 0\n elif i > j:\n # map_obstacles[i][j] = map_obstacles[j][i]\n row[j] = map_obstacles[j][i]\n else:\n # map_obstacles[i][j] = 1 if random.random() > 0.9 else 0\n row[j] = 1 if random.random() > obstacle_rate else 0\n map_obstacles.append(row)\n\n self.map_obstacle = map_obstacles", "def _create_board(self):\n board = []\n for i in range(self.rows):\n row = []\n for j in range(self.columns):\n row.append(\n {\n \"c\": j + 1, # c column number base 1\n \"r\": i + 1, # r row number base 1\n \"v\": False, # v visible\n \"f\": 0, # f flag\n \"n\": 0, # n neighbors value\n \"b\": False, # has a bomb , The bombs are created on start\n }\n )\n board.append(row)\n self.board = board", "def spawn_obstacles(self):\n self.obstacle_sprites.empty()\n number_of_obstacles = random.randint(MIN_OBSTACLES, MAX_OBSTACLES)\n while len(self.obstacle_sprites) < number_of_obstacles:\n obstacle = Obstacle(random.randrange(0, WIDTH), random.randrange(HEIGHT - 500, HEIGHT))\n obstacle_collision = pygame.sprite.spritecollide(obstacle, self.obstacle_sprites, False)\n if not obstacle_collision:\n self.obstacle_sprites.add(obstacle)", "def new_tile(self):\n zero_list = []\n zero_cell = ()\n # self._cells = [[0 for col in range(self._grid_width)] for row in range(self._grid_height)]\n for row in range(self._grid_height):\n for col in range(self._grid_width):\n if self._cells[row][col] == 0:\n zero_cell = (row, col)\n zero_list.append(zero_cell)\n if len(zero_list) > 0:\n chance = random.randrange(0,10)\n cell_idx = random.randrange(len(zero_list))\n if chance == 9:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 4\n else:\n self._cells[zero_list[cell_idx][0]][zero_list[cell_idx][1]] = 2\n else:\n print(\"You lost! Better luck next time!\")", "def generate_obstacles(self):\r\n obstacles = self.get_obstable_metrics\r\n obstacle_arrays = []\r\n\r\n for nb_obstacle in obstacles:\r\n empty_array = np.zeros(shape=(self.WINDOW_HEIGHT,\r\n self.WINDOW_WIDTH))\r\n start_location = 0 if nb_obstacle[2] == 1 else self.WINDOW_HEIGHT\r\n y, x = start_location - 1, nb_obstacle[3]\r\n empty_array[y, x] = -1\r\n\r\n for w_value in range(nb_obstacle[0]):\r\n x_updated = x + w_value\r\n\r\n for h_value in range(nb_obstacle[1]):\r\n if nb_obstacle[2] == 1:\r\n y_updated = y + h_value\r\n else:\r\n y_updated = y - h_value\r\n # Replace Value\r\n empty_array[y_updated, x_updated] = -1\r\n\r\n new_array = self.trim_whitespace(empty_array,\r\n nb_obstacle[2],\r\n self.MIN_GAP)\r\n obstacle_arrays.append(new_array)\r\n\r\n return obstacle_arrays", "def new_tile(self):\r\n random_row = random.randrange(0, self._grid_height)\r\n random_col = random.randrange(0, self._grid_width)\r\n random_choice = random.choice([2]*90 + [4] * 10)\r\n \r\n if 0 in [num for elem in self._cells for num in elem]: \r\n if self._cells[random_row][random_col] == 0:\r\n self._cells[random_row][random_col] = random_choice \r\n else:\r\n self.new_tile()\r\n else:\r\n pass", "def _generate_maze(self):\n grid = [[GridCell(x, y, self._treasure_prob) for x in range(self._map_size)] for y in range(self._map_size)]\n\n center_x = self._map_size // 2\n center_y = self._map_size // 2\n\n for _ in range(self._sparsity):\n current = grid[center_x][center_y]\n stack = list()\n start = True\n while len(stack) or start:\n start = False\n current.visited = True\n children = current.has_children(grid)\n\n if children:\n choice = np.random.choice(children)\n choice.visited = True\n\n stack.append(current)\n\n self._remove_walls(current, choice)\n\n current = choice\n\n elif stack:\n current = stack.pop()\n for row in grid:\n for cell in row:\n cell.visited = False\n\n # edit center area\n grid[center_x][center_y].set_treasury()\n for x in range(center_x - 1, center_x + 2):\n for y in range(center_y - 1, center_y + 2):\n grid[x][y].erase_walls()\n return grid", "def __generate_octagon_obstacles(self, world):\n obs_radius = self.cfg[\"obstacle\"][\"octagon\"][\"radius\"]\n obs_min_count = self.cfg[\"obstacle\"][\"octagon\"][\"min_count\"]\n obs_max_count = self.cfg[\"obstacle\"][\"octagon\"][\"max_count\"]\n obs_min_dist = self.cfg[\"obstacle\"][\"octagon\"][\"min_distance\"]\n obs_max_dist = self.cfg[\"obstacle\"][\"octagon\"][\"max_distance\"]\n\n # generate the obstacles\n obstacles = []\n obs_dist_range = obs_max_dist - obs_min_dist\n num_obstacles = randrange(obs_min_count, obs_max_count + 1)\n\n test_geometries = [r.global_geometry for r in world.robots]\n while len(obstacles) < num_obstacles:\n\n # generate position\n dist = obs_min_dist + (random() * obs_dist_range)\n phi = -pi + (random() * 2 * pi)\n x = dist * sin(phi)\n y = dist * cos(phi)\n\n # generate orientation\n theta = -pi + (random() * 2 * pi)\n\n # test if the obstacle overlaps the robots or the goal\n obstacle = OctagonObstacle(obs_radius, Pose(x, y, theta))\n intersects = False\n for test_geometry in test_geometries:\n intersects |= geometrics.convex_polygon_intersect_test(test_geometry, obstacle.global_geometry)\n if not intersects:\n obstacles.append(obstacle)\n return obstacles", "def obstacles(self):\r\n\r\n #Radious arround the head\r\n limit_sight = self.snake_sight\r\n head = self.body[0].position\r\n binary_map_complete = self.complete_mapping()\r\n map_matrix = np.matrix(binary_map_complete)\r\n obstacles = []\r\n\r\n #limits in all directions\r\n left_x = head[0] - limit_sight\r\n right_x = head[0] + limit_sight\r\n up_y = head[1] - limit_sight\r\n down_y = head[1] + limit_sight\r\n\r\n #submatrix with limits size\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:right_x+1]\r\n\r\n #Special cases where the snake approximates to the borders\r\n ##Corners\r\n if left_x < 0 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[temporal, snake_sight] \r\n return snake_sight\r\n \r\n if left_x < 0 and down_y > self.limits[1] - 1:\r\n snake_sight = map_matrix[up_y:self.limits[1], 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], 0:right_x+1]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_x_y_matrix, interval_y_matrix]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_x_matrix = map_matrix[0:down_y+1, interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[temporal, snake_sight]\r\n return snake_sight\r\n \r\n if right_x > self.limits[0]-1 and down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_x_matrix = map_matrix[up_y:self.limits[1], interval_x[0]:interval_x[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:self.limits[0]]\r\n interval_x_y_matrix = map_matrix[interval_y[0]:interval_y[1], interval_x[0]:interval_x[1]]\r\n temporal = np.c_[interval_y_matrix, interval_x_y_matrix]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n snake_sight = np.r_[snake_sight, temporal]\r\n return snake_sight\r\n\r\n ##Middle\r\n if left_x < 0:\r\n snake_sight = map_matrix[up_y:down_y+1, 0:right_x+1]\r\n interval_x = [self.limits[0] + left_x, self.limits[0]]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[interval_x_matrix, snake_sight]\r\n return snake_sight\r\n\r\n if right_x > self.limits[0]-1:\r\n snake_sight = map_matrix[up_y:down_y+1, left_x:self.limits[0]]\r\n interval_x = [0, right_x - self.limits[0] + 1]\r\n interval_x_matrix = map_matrix[up_y:down_y+1, interval_x[0]:interval_x[1]]\r\n snake_sight = np.c_[snake_sight, interval_x_matrix]\r\n return snake_sight\r\n\r\n if up_y < 0:\r\n snake_sight = map_matrix[0:down_y+1, left_x:right_x+1]\r\n interval_y = [self.limits[1] + up_y, self.limits[1]]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[interval_y_matrix, snake_sight]\r\n return snake_sight\r\n \r\n if down_y > self.limits[1]-1:\r\n snake_sight = map_matrix[up_y:self.limits[1], left_x:right_x+1]\r\n interval_y = [0, down_y - self.limits[1] + 1]\r\n interval_y_matrix = map_matrix[interval_y[0]:interval_y[1], left_x:right_x+1]\r\n snake_sight = np.r_[snake_sight, interval_y_matrix]\r\n return snake_sight\r\n\r\n return snake_sight", "def new_tile(self):\n \n empty_items = []\n for row in range(self.get_grid_height()):\n for col in range(self.get_grid_width()):\n if self.get_tile(row, col) == 0:\n empty_items.append((row, col))\n \n random_row = 0\n random_col = 0\n if len(empty_items) != 0:\n random_empty_tile = random.randrange(0, len(empty_items))\n (random_row, random_col) = empty_items[random_empty_tile]\n else:\n return\n # the % of getting \"4\" from 0~9 is 10%\n random_time = random.randrange(0, 10)\n \n if random_time == 4:\n self._cells[random_row][random_col] = 4\n else:\n self._cells[random_row][random_col] = 2", "def _generate_cells(self) -> None:\n for i in range(15):\n for j in range(15):\n c = Cell(x=i, y=j)\n c.answer = self.puzzle.solution[j*self.width+i]\n self.cells[(j, i)] = c # row, col", "def make_board(self, ):\n for r in range(self.boardSize):\n for c in range(self.boardSize): # avoid redundant calculation by adding neighbors \"behind\" current cell\n new_cell = Cell(r, c)\n self.board[r][c] = new_cell\n if c > 0: # add left neighbor-cell\n new_cell.add_neighbor(self.board[r][c-1])\n if r > 0: # add above neighbor-cell\n new_cell.add_neighbor(self.board[r-1][c])\n if r > 0 and c < self.boardSize-1: # add right diagonal neighbor-cell\n new_cell.add_neighbor(self.board[r-1][c+1])", "def generate_board(self):\n random.seed(self.seed)\n for row in self.grid:\n for column in row:\n probability = random.random()\n if self.live_probability > probability:\n column.set_alive()", "def new_tile(self):\n # replace with your code\n empty_list = []\n counter_1 = 0\n for _ in self._grid:\n counter_2 = 0\n line = _\n for blank in line:\n if blank == 0:\n blank_tile = (counter_1, counter_2)\n empty_list.append(blank_tile)\n counter_2 += 1\n else:\n counter_2 += 1\n counter_1 += 1\n #print empty_list\n \n self._tile = empty_list[random.randrange(len(empty_list))]\n \n value = [2,2,2,2,2,2,2,2,2,4]\n tile_value = value[random.randint(0,9)]\n \n self.set_tile(self._tile[0], self._tile[1], tile_value)", "def new_tile(self):\n while True:\n random_row = random.randrange(self._grid_height)\n random_column = random.randrange(self._grid_width)\n if self._grid[random_row][random_column] == 0:\n self._grid[random_row][random_column] = random.choice([2] * 9 + [4])\n break", "def random_map(self, world):\n obstacles = []\n if self.cfg[\"obstacle\"][\"octagon\"][\"enabled\"]:\n obstacles += self.__generate_octagon_obstacles(world)\n if self.cfg[\"obstacle\"][\"rectangle\"][\"enabled\"]:\n obstacles += self.__generate_rectangle_obstacles(world)\n\n # update the current obstacles and goal\n self.current_obstacles = obstacles\n self.add_new_goal()\n\n # apply the new obstacles and goal to the world\n self.apply_to_world(world)", "def __generate_goal_board(self):\n element = 1\n array = []\n\n for row in range(self._n):\n row_to_append = []\n for col in range(self._n):\n row_to_append.append(element)\n element += 1\n array.append(row_to_append)\n\n array[self._n - 1][self._n - 1] = 0\n self._solved_board = Board(array=array, space=[self._n - 1, self._n - 1])", "def new_tile(self):\n col = random.choice(range(self.grid_width))\n row = random.choice(range(self.grid_height))\n if self.grid[row][col] == 0:\n if random.random() >= 0.9:\n self.grid[row][col] = 4\n else:\n self.grid[row][col] = 2\n else:\n self.new_tile()", "def init_place(self):\n for i in range(self.numCells):\n x = randint(0,self.nx)\n y = randint(0,self.ny)\n while not self.is_empty(x,y):\n x = randint(0, self.nx)\n y = randint(0, self.ny)\n assert self.put_cell(x, y, i) is True\n self.cells.append(Cell(x,y))\n\n assert self.calc_cost() is True", "def new_tile(self):\r\n # replace with your code\r\n empty_square_lists = []\r\n for row in range(self._grid_height):\r\n for col in range(self._grid_width):\r\n if(self.get_tile(row, col) == 0):\r\n empty_square_lists.append((row, col))\r\n \r\n if len(empty_square_lists) == 0:\r\n return \"game over!\"\r\n \r\n random_cell = random.choice(empty_square_lists)\r\n random_cell_row = random_cell[0]\r\n random_cell_col = random_cell[1]\r\n \r\n values = [2] * 90 + [4] * 10\r\n value = random.choice(values)\r\n \r\n self.set_tile(random_cell_row, random_cell_col, value)", "def __generate_rectangle_obstacles(self, world):\n obs_min_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"min_dim\"]\n obs_max_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"max_dim\"]\n obs_max_combined_dim = self.cfg[\"obstacle\"][\"rectangle\"][\"max_combined_dim\"]\n obs_min_count = self.cfg[\"obstacle\"][\"rectangle\"][\"min_count\"]\n obs_max_count = self.cfg[\"obstacle\"][\"rectangle\"][\"max_count\"]\n obs_min_dist = self.cfg[\"obstacle\"][\"rectangle\"][\"min_distance\"]\n obs_max_dist = self.cfg[\"obstacle\"][\"rectangle\"][\"max_distance\"]\n\n # generate the obstacles\n obstacles = []\n obs_dim_range = obs_max_dim - obs_min_dim\n obs_dist_range = obs_max_dist - obs_min_dist\n num_obstacles = randrange(obs_min_count, obs_max_count + 1)\n\n test_geometries = [r.global_geometry for r in world.robots]\n while len(obstacles) < num_obstacles:\n # generate dimensions\n width = obs_min_dim + (random() * obs_dim_range )\n height = obs_min_dim + (random() * obs_dim_range )\n while width + height > obs_max_combined_dim:\n height = obs_min_dim + (random() * obs_dim_range )\n\n # generate position\n dist = obs_min_dist + (random() * obs_dist_range)\n phi = -pi + (random() * 2 * pi)\n x = dist * sin(phi)\n y = dist * cos(phi)\n\n # generate orientation\n theta = -pi + (random() * 2 * pi)\n\n # test if the obstacle overlaps the robots or the goal\n obstacle = RectangleObstacle(width, height, Pose(x, y, theta))\n intersects = False\n for test_geometry in test_geometries:\n intersects |= geometrics.convex_polygon_intersect_test(test_geometry, obstacle.global_geometry)\n if not intersects:\n obstacles.append(obstacle)\n return obstacles", "def __init__(self, window: pg.Surface):\n self.window = window\n self.board_matrix = np.full(Dimension.board_size(), 1)\n self.maximum_obstacles_on_board = 10\n self.obstacles = self.create_obstacles()", "def generate_board(rows, cols):\n aux = np.zeros((rows, cols))\n for i in range(rows):\n for j in range(cols):\n if np.random.random() < 0.5:\n aux[i][j] = 1\n return aux", "def __init__(self, rows, columns, live_probability=0.3, seed=0):\n self.live_probability = live_probability\n self.seed = seed\n self.rows = rows\n self.columns = columns\n self.grid = [\n [Cell() for column_cells in range(self.columns)]\n for row_cells in range(self.rows)\n ]\n\n self.generate_board()", "def make_board(self):\n generate = lambda: random.randint(1, 100) in range(1, self.p_pit+1)\n some_number = self.some_number\n agent = Agent(some_number)\n agent.program = Oozeplorer_Percept(agent)\n self.add_agent(agent)\n gold = Gold()\n self.add_thing(gold, None)\n for row in range(1, some_number + 1):\n for col in range(1, some_number + 1):\n valid_spot = (row, col) != gold.location and (row, col) != (1, 1)\n if valid_spot and generate():\n t_pt = Pit()\n t_pt.location = (row, col)\n self.things.append(t_pt)", "def generate():\n global BOARD\n next = [[0] * ROWS for _ in range(COLS)]\n # Loop through every spot in our 2D array and check spots neighbors\n for x in range(COLS):\n for y in range(ROWS):\n # Add up all the states in a 3x3 surrounding grid\n neighbors = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n nx = (x + i + COLS) % COLS\n ny = (y + j + ROWS) % ROWS\n neighbors += BOARD[nx][ny]\n # A little trick to subtract the current cell's state since\n # we added it in the above loop\n neighbors -= BOARD[x][y]\n # Rules of Life\n if BOARD[x][y] == 1 and neighbors < 2 : next[x][y] = 0 # Loneliness\n elif BOARD[x][y] == 1 and neighbors > 3 : next[x][y] = 0 # Overpopulation\n elif BOARD[x][y] == 0 and neighbors == 3: next[x][y] = 1 # Reproduction\n else: next[x][y] = BOARD[x][y] # Stasis\n # Next is now our board\n BOARD = next", "def randomCells(width, height):\r\n\tA = createBoard(height, width)\r\n\r\n\tfor row in range(height):\r\n\t\tfor col in range(width):\r\n\t\t\tif row > 0 and row < height-1:\r\n\t\t\t\tif col > 0 and col < width-1:\r\n\t\t\t\t\tA[row][col] = random.choice([0,1]) \r\n\r\n\treturn A", "def randomGrid(N):\n grid = np.zeros((N,N), dtype=int)\n for i in range(N): \n for j in range(N): \n if np.random.uniform() < 0.2:\n # cell alive\n grid[i,j] = int(np.random.uniform(low=1, high=(256*256*256)-1))\n return grid" ]
[ "0.78306466", "0.7273958", "0.70843565", "0.70556134", "0.69390327", "0.6925378", "0.6871779", "0.67835164", "0.66807145", "0.6673614", "0.6662289", "0.6659398", "0.6652911", "0.66385484", "0.6629301", "0.65946746", "0.6584933", "0.6569968", "0.6563175", "0.65319467", "0.65285003", "0.6526813", "0.651448", "0.650033", "0.6498514", "0.64860123", "0.64729375", "0.6471756", "0.6447816", "0.6444872" ]
0.76295495
1
Checks if clicked square is not obstacle and it's possible to start/end path here.
def is_square_empty(self, clicked_square: Square) -> bool: return clicked_square not in self.obstacles
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_obstacle_in_path(self):\n for obstacle in self.obstacles.tolist():\n print(\"obstacle.get_point():\", obstacle.get_point())\n dist_to_obstacle = VectorMath.get_vector_magnitude(np.subtract(obstacle.get_point(), self.drone.get_point()))\n if dist_to_obstacle < obstacle.get_radius() + Constants.DETECTION_THRESHOLD:\n if isinstance(obstacle, StationaryObstacle):\n paths = self.generate_possible_paths(obstacle)\n\n if len(paths) != 0:\n return True, np.array(paths)\n elif isinstance(obstacle, MovingObstacle):\n pass\n\n return False, None", "def clicked(self, x_pos, y_pos):\n img = self.tower_imgs\n if self.x - img.get_width() // 2 + self.width >= x_pos >= self.x - img.get_width() // 2:\n if self.y + self.height - img.get_height() // 2 >= y_pos >= self.y - img.get_height() // 2:\n return True\n return False", "def _detect_obstacles(self):\n def _distance(point, line_point1, line_point2):\n \"\"\"calcuate the distance between a point and a line\"\"\"\n vec1 = line_point1 - point\n vec2 = line_point2 - point\n distance = np.abs(np.cross(vec1,vec2)) / np.linalg.norm(line_point1-line_point2)\n return distance\n\n def _acute_angle(point, line_point1, line_point2):\n \"\"\"detetrmine if the point is whithin the boundary of the line through law of cosines\"\"\"\n base_line = np.linalg.norm(line_point1-line_point2)\n assert base_line > 0, \"check the library useage\"\n line1 = np.linalg.norm(point - line_point1)\n line2 = np.linalg.norm(point - line_point2)\n cos_angle_1 = (base_line**2 + line1**2 - line2**2)/(2*base_line*line1)\n cos_angle_2 = (base_line**2 + line2**2 - line1**2)/(2*base_line*line2)\n if cos_angle_1 * cos_angle_2 > 0:\n return True\n else:\n return False\n\n if self.obstacles != \"None\": # if user assigned some obstacles\n for line in self.env_config: \n line_point1, line_point2 = np.array(line[0]), np.array(line[1])\n point = np.array(self.state[:2])\n distance = _distance(point, line_point1, line_point2)\n acute_angle = _acute_angle(point, line_point1, line_point2)\n if distance <= 0.02 and acute_angle:\n self.adsorption = True\n break\n else:\n self.adsorption = False", "def check_shot_on_target(self, shot):\n # Defining a few variables to ease the reading\n # Here we define the x and y interval of the goal's segment\n x_min = min(self.s_pos.x, self.e_pos.x)\n x_max = max(self.s_pos.x, self.e_pos.x)\n\n y_min = min(self.s_pos.y, self.e_pos.y)\n y_max = max(self.s_pos.y, self.e_pos.y)\n\n # Shortening variables names\n o_x = shot.opponent.pos.x\n o_y = shot.opponent.pos.y\n\n # If the angle = pi / 2 or - pi / 2, then tan(angle) is undefined\n # In these cases, the shot is vertical, therefore it is valid\n # iff the x coordinate of the opponent is in the goal's x interval\n if abs(shot.angle) == math.pi / 2:\n return self.is_in_interval(x_min, x_max, o_x)\n\n # If the angle = 0, pi or -pi, then tan(angle) is 0 which can lead to \n # undefined intersection points (if the goal is vertical for example)\n # although there is an intersection point\n # \n # In these cases, the shot is horizontal, therefore it is valid\n # iff the y coordinate of the opponent is in the goal's y interval\n if abs(shot.angle) == math.pi or shot.angle == 0:\n return self.is_in_interval(y_min, y_max, o_y)\n\n # Using tan the least amount of time possible, for this is a slow function\n tan_theta = math.tan(shot.angle)\n\n # Define the LE of the shot\n le1 = LinearEquation(tan_theta, o_y - tan_theta * o_x)\n le2 = None\n\n # If the goal is vertical, finding the intersection point\n # is not possible using the normal way\n #\n # That being said, unless the LE of the shot is vertical too (which it \n # isn't as it is checked before hand) there has to be an intersection point\n # This intersection must happen when at the x coodinate of the goal's segment\n # therefore, it is possible to compute the y coordinate of the intersection by\n # computing the application of the shot's LE on this ex coordinate\n #\n # Then, the resulting y is valid iff it is in the goal's segment interval\n if self.e_pos.x - self.s_pos.x == 0:\n y = le1.apply(self.e_pos.x)\n return self.is_in_interval(y_min, y_max, y)\n\n # The normal way of solving the intersection of these two LEs\n else:\n\n # Shortening variables by computing the coefficient of the goal's LE\n ratio = (self.e_pos.y - self.s_pos.y) / (self.e_pos.x - self.s_pos.x)\n\n # If the lines are parallels (have the same coefficient) return False\n if math.tan(shot.angle) == ratio:\n return False\n\n # Defining the goal's LE\n le2 = LinearEquation(ratio, self.e_pos.y - self.e_pos.x * ratio)\n\n # Finding the intersection point of the two LEs\n # If there isn't one, return False (but there should be one\n # given all the asserts we do before hand, this is just for completion sake)\n p_intersect = le1.intersection(le2)\n if p_intersect == None:\n return False\n\n # If the intersection point's abscissa is in the goal's x interval, then it is\n # a valid abstracted shot going \n return self.is_in_interval(x_min, x_max, p_intersect.x)", "def isPieceClicked(self):\r\n if self.clickedPiece is None:\r\n return False\r\n return True", "def obstacle_prone_area(self,image):\r\n\r\n start_x=int(self.start[0])\r\n start_y=int(self.start[1])\r\n goal_x=int(self.goal[0])\r\n goal_y=int(self.goal[1])\r\n print(goal_x,goal_y)\r\n if (image[int(self.maximum_size-goal_x),int(goal_y),0]==0) or ((image[int(self.maximum_size-start_x),int(start_y),0]==0)):\r\n #print(1)\r\n return False\r\n else:\r\n #print(2)\r\n return True", "def generate_possible_paths(self, obstacle):\n if self.does_uav_intersect_obstacle_vertically(obstacle, self.drone.get_point(), self.drone.get_waypoint_holder().get_current_waypoint()):\n if self.does_path_intersect_obstacle_2d(obstacle, self.drone.get_point(), self.drone.get_waypoint_holder().get_current_waypoint()):\n new_attempt_pos_points = [\n [obstacle.get_point()[0] + obstacle.get_radius(), obstacle.get_point()[1] + obstacle.get_radius(), self.drone.get_point()[2]],\n [obstacle.get_point()[0] - obstacle.get_radius(), obstacle.get_point()[1] - obstacle.get_radius(), self.drone.get_point()[2]],\n [obstacle.get_point()[0] + obstacle.get_radius(), obstacle.get_point()[1] - obstacle.get_radius(), self.drone.get_point()[2]],\n [obstacle.get_point()[0] - obstacle.get_radius(), obstacle.get_point()[1] + obstacle.get_radius(), self.drone.get_point()[2]],\n [obstacle.get_point()[0], obstacle.get_point()[1] + obstacle.get_radius(), obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)],\n [obstacle.get_point()[0], obstacle.get_point()[1] - obstacle.get_radius(), obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)],\n [obstacle.get_point()[0] + obstacle.get_radius(), obstacle.get_point()[1], obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)],\n [obstacle.get_point()[0] - obstacle.get_radius(), obstacle.get_point()[1], obstacle.get_height() + (Constants.STATIONARY_OBSTACLE_SAFETY_RADIUS * 2)]\n ]\n\n new_paths = []\n for new_pos_point in new_attempt_pos_points:\n if not self.does_path_intersect_obstacle_3d(obstacle, self.drone.get_point(), new_pos_point) and self.flight_boundary.is_point_in_bounds(new_pos_point):\n for recursive_new_pos_point in new_attempt_pos_points:\n if self.flight_boundary.is_point_in_bounds(recursive_new_pos_point) and abs(recursive_new_pos_point[2] - new_pos_point[2]) < 5:\n if recursive_new_pos_point[0] != new_pos_point[0] or recursive_new_pos_point[1] != new_pos_point[1]:\n if not self.does_path_intersect_obstacle_3d(obstacle, new_pos_point, recursive_new_pos_point) and not self.does_path_intersect_obstacle_3d(obstacle, recursive_new_pos_point, self.drone.get_waypoint_holder().get_current_waypoint()):\n new_paths.append([new_pos_point, recursive_new_pos_point])\n\n # Uncomment for DEBUGGING ONLY\n for path in new_paths:\n print(\"Point:\", str(path))\n\n return new_paths\n\n return []", "def is_at_goal(self):\n return self._current_loc.get_row() == BoardPath._goal_loc.get_row() and \\\n self._current_loc.get_column() == BoardPath._goal_loc.get_column()", "def is_clickable(self, x, y, board):\n if board[x][y] == self.EMPTY:\n return False\n for offset_x, offset_y in self.SPREAD:\n try:\n if board[x + offset_x][y + offset_y] == board[x][y]:\n return True\n except IndexError:\n pass\n return False", "def passable(self, point):\n return point not in self.obstacles", "def click(self, X, Y):\n img = self.tower_imgs\n if self.x - img.get_width() // 2 + self.width >= X >= self.x - img.get_width() // 2:\n if self.y + self.height - img.get_height() // 2 >= Y >= self.y - img.get_height() // 2:\n return True\n return False", "def __check_obstacle_intersections(self, goal):\n # generate a proximity test geometry for the goal\n min_clearance = self.cfg[\"goal\"][\"min_clearance\"]\n n = 6 # goal is n sided polygon\n goal_test_geometry = []\n for i in range(n):\n goal_test_geometry.append(\n [goal[0] + min_clearance * cos(i * 2 * pi / n),\n goal[1] + min_clearance * sin(i * 2 * pi / n)])\n goal_test_geometry = Polygon(goal_test_geometry)\n intersects = False\n for obstacle in self.current_obstacles:\n intersects |= geometrics.convex_polygon_intersect_test(goal_test_geometry, obstacle.global_geometry)\n return intersects", "def isGoalState(self, state):\n coordinates = state[0]\n edges = state[1]\n corners = self.corners\n TotalCorners = 4\n\n if(len(edges) == TotalCorners):\n return True\n else:\n if coordinates in corners:\n if not coordinates in edges:\n edges.append(coordinates)\n return False", "def _is_wall(self, pos):\r\n return self.course[pos[0], pos[1]] == -1", "def _checkPath(self):\r\n if(not self._isStraightLine()):\r\n raise IllegalMoveException(\"Move is not a straight line\")\r\n path = self._getPath()\r\n if(any(cell.isOccupied() for cell in path)):\r\n raise IllegalMoveException(\"There are pawns on the path\")\r\n return True", "def wasClicked(self, point):\n p1 = self.rect.getP1()\n p2 = self.rect.getP2()\n if (p1.getX() <= point.getX() <= p2.getX() and\n p1.getY() <= point.getY() <= p2.getY()):\n return True\n return False", "def is_map_obstacle_in_screen_range(self):\n raise NotImplementedError", "def is_in_obstacle(self, x: float, y: float) -> bool:\n for obstacle in self.obstacles:\n if obstacle.contains_point((x, y)):\n return True\n return False", "def check_for_obstacles(self):\n obs = False\n obs_p = []\n for point in self.obstacles:\n if -0.15 <= point[1] <= 0.15: # robot is 178mm wide\n # Obstacles should be less than or equal to 0.2 m away before being detected\n if 0 <= point[0] <= .2:\n obs_p.append(point)\n obs = True\n if obs:\n pos = self.determine_pos_of_obstacle(obs_p)\n data = Obstacle()\n data.x = pos[0]\n data.y = pos[1]\n data.obstacle = True\n self.obs_pub.publish(data)", "def goal_occupied(self, view):\n for line in view.obstacles:\n if linesegdist2(line.p1, line.p2, self.goal) < self.radius ** 2:\n return True\n\n for p in view.pedestrians:\n if p.velocity.length2() == 0.0:\n if p.position.distance_to2(self.goal) < p.radius:\n return True\n\n return False", "def _is_closing_shape(self, x, y, new_obstacle):\n first_obstacle_edge = new_obstacle[0]\n\n close_x = abs(first_obstacle_edge[0] - x) < self.CLOSE_ENOUGH_POINTS_TOLERANCE\n close_y = abs(first_obstacle_edge[1] - y) < self.CLOSE_ENOUGH_POINTS_TOLERANCE\n\n return close_x and close_y", "def isInGoal(self):\n coordx= self.playerPos.x\n coordy= self.playerPos.y\n target = 0 if self.id_team == 1 else 1\n\n if((((target == 0)and (coordx<=5))|\n ((target == 1) and(coordx>145))) \n and (coordy<=50 and coordy>=40)):\n return True\n else:\n return False", "def click(self, event):\n if self.segs == []:\n startCircle = self.findInter(event.x, event.y)\n if startCircle:\n xa, ya, xb, yb = self.can.coords(startCircle)\n self.firstCoords = ((xa + xb)/2, (ya + yb)/2)\n if not self.helpShown:\n self.showHelp()", "def __hit_bricks(self, g_object):\n return type(g_object) == GRect and g_object != self.__paddle", "def __isTileGoalState(self, point):\n return point == self.goalPoint", "def check_obstruction(self, start_x, start_y, end_x, end_y, piece):\n\n # Displacement for any single point in the area\n disp_x = end_x - start_x\n disp_y = end_y - start_y\n\n # Piece's area to shift for obstructions\n space = piece.get_area()\n\n # Game board area, initialize check spaces for while loop\n board_space = self._game_board.get_board_area()\n check_x = 0\n check_y = 0\n\n # Assign correct shift value for displacement\n if disp_x > 0:\n shift_x = 1\n elif disp_x == 0:\n shift_x = 0\n else:\n shift_x = -1\n\n if disp_y > 0:\n shift_y = 1\n elif disp_y == 0:\n shift_y = 0\n else:\n shift_y = -1\n\n # For each point in space\n for point in space:\n scale = 1\n # Gradually shift values in piece area up to displacement and check if the space is occupied\n while (check_x, check_y) != (point[0] + disp_x, point[1] + disp_y):\n check_x = point[0] + shift_x * scale\n check_y = point[1] + shift_y * scale\n\n # If an obstruction is found, and it is not a piece meant to be captured\n # ie, a piece in the end-position, return True\n if ((check_x, check_y) not in space) and board_space[check_x][check_y] != \" \":\n if (check_x, check_y) != (point[0] + disp_x, point[1] + disp_y):\n return True\n scale += 1\n # Return False if not obstructed\n return False", "def goal_test(self, state):\n for x, y in state.alvos:\n if state.tabuleiro[x][y] is not BOX_ON_TARGET:\n return False\n return True", "def check_movement(self):\n is_clear = True # default return value if no obstacles\n # !!! IR_SENSORS DISABLED\n if self.move_state == MOV_FORWARD:\n if self.l.look_for_obstacle(OBST_FRONT) == True:\n is_clear = False\n return is_clear", "def check_path(self, cur_pos, new_pos, board, state):\n\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n cannon_pieces = [Cannon('BLUE'), Cannon('RED')]\n \n # Ensures the range is always in the right order\n if new_row > cur_row: \n ran_r = range(cur_row + 1, new_row, 1)\n elif cur_row > new_row:\n ran_r = range(cur_row - 1, new_row, -1)\n \n elif new_col > cur_col:\n ran_c = range(cur_col + 1, new_col, 1)\n elif cur_col > new_col:\n ran_c = range(cur_col - 1, new_col, -1)\n else:\n return False\n \n # Checking if the movement is left or right is legal\n if new_row == cur_row:\n print(\"it's in the new_row == cur_row\")\n # Check if there is a legal piece (a non-Cannon) is contained in the path\n counter = 0\n print(counter)\n for col_spot in ran_c:\n if board[cur_row][col_spot] is not None:\n counter += 1\n\n if counter == 0: \n print(\"jump!\")\n return True\n \n # Checking if the movement vertical is legal\n if new_col == cur_col:\n print(\"it's in the new_col == cur_col\")\n # Check if there is a legal piece (a non-Cannon) is contained in the path\n counter = 0\n for row_spot in ran_r:\n if board[row_spot][cur_col] is not None:\n counter += 1\n print(board[row_spot][cur_col])\n print(counter)\n if counter == 0:\n print(\"jump!\")\n return True", "def check_clicked(self, events):\n x = self.x\n y = self.y\n xsize = self.xsize\n ysize = self.ysize\n (a, b) = pygame.mouse.get_pos()\n if a>x and b>y and a<x+xsize and b<y+ysize:\n for event in events:\n if event.type == pygame.MOUSEBUTTONDOWN:\n self.clickedAction(events)\n self.clicked = True\n return self.clicked" ]
[ "0.67216086", "0.6399878", "0.63358396", "0.6280128", "0.626441", "0.6188265", "0.61713976", "0.61628646", "0.60517824", "0.6038364", "0.6031158", "0.6017531", "0.6014424", "0.60048735", "0.60025954", "0.6000857", "0.59907734", "0.59674644", "0.5930595", "0.5913718", "0.59069806", "0.5895609", "0.5891925", "0.5876296", "0.58635116", "0.5860127", "0.58569705", "0.58458287", "0.5844424", "0.58435255" ]
0.6928356
0
Creates new state of board with different number of obstacles and their positions.
def recreate_obstacles(self): self.board_matrix = np.full(Dimension.board_size(), 1) self.obstacles = self.create_obstacles()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_board(self):\n board = []\n for i in range(self.rows):\n row = []\n for j in range(self.columns):\n row.append(\n {\n \"c\": j + 1, # c column number base 1\n \"r\": i + 1, # r row number base 1\n \"v\": False, # v visible\n \"f\": 0, # f flag\n \"n\": 0, # n neighbors value\n \"b\": False, # has a bomb , The bombs are created on start\n }\n )\n board.append(row)\n self.board = board", "def __init__(self, num_rows = 4, num_cols = 4,\n first_mover = \"W\", top_left = \"B\",\n how_to_win = \">\", initial_config=[]):\n # initial_config was made for AI Othello to\n # get around pass-by-reference behavior of lists.\n if (4 > num_rows > 16) or num_rows % 2 != 0:\n raise Exception\n else:\n self._num_rows = num_rows\n if (4 > num_cols > 16) or num_cols % 2 != 0:\n raise Exception\n else:\n self._num_cols = num_cols\n if first_mover != \"B\" and first_mover != \"W\":\n raise Exception\n else:\n self._turn = first_mover\n if top_left != \"B\" and top_left != \"W\":\n raise Exception\n else:\n self._top_left = top_left\n if how_to_win != \">\" and how_to_win != \"<\":\n raise Exception\n else:\n self._how_to_win = how_to_win\n\n if initial_config == []:\n self._board = self._make_board(num_rows, num_cols, top_left)\n else:\n self._board = deepcopy(initial_config)\n \n self._game_over = False\n self._winner = \" \"\n self._tl_cell = (0, 0)\n self._tr_cell = (0, num_cols-1)\n self._bl_cell = (num_rows-1, 0)\n self._br_cell = (num_rows-1, num_cols-1)\n self._ls_cells = [(c, 0) for c in range(1, num_rows-1)]\n self._rs_cells = [(c, num_cols-1) for c in range(1, num_rows-1)]\n self._ts_cells = [(0, c) for c in range(1, num_cols-1)]\n self._bs_cells = [(num_rows-1, c) for c in range(1, num_cols-1)]\n #^Note how ranges start from 1 and go to num_rows-1 to avoid corners,\n #which are processed differently", "def generate():\n global BOARD\n next = [[0] * ROWS for _ in range(COLS)]\n # Loop through every spot in our 2D array and check spots neighbors\n for x in range(COLS):\n for y in range(ROWS):\n # Add up all the states in a 3x3 surrounding grid\n neighbors = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n nx = (x + i + COLS) % COLS\n ny = (y + j + ROWS) % ROWS\n neighbors += BOARD[nx][ny]\n # A little trick to subtract the current cell's state since\n # we added it in the above loop\n neighbors -= BOARD[x][y]\n # Rules of Life\n if BOARD[x][y] == 1 and neighbors < 2 : next[x][y] = 0 # Loneliness\n elif BOARD[x][y] == 1 and neighbors > 3 : next[x][y] = 0 # Overpopulation\n elif BOARD[x][y] == 0 and neighbors == 3: next[x][y] = 1 # Reproduction\n else: next[x][y] = BOARD[x][y] # Stasis\n # Next is now our board\n BOARD = next", "def _build_board(y_size, x_size, game_board):\n\n for y_coordinate in range(1, y_size + 1):\n for x_coordinate in range(1, x_size + 1):\n game_board[(x_coordinate, y_coordinate)] = {0: {}, 1: {}, 2: {}}", "def create_obstacles(self) -> List[Square]:\n obstacles_number = random.randint(1, self.maximum_obstacles_on_board)\n obstacles = list()\n\n while len(obstacles) < obstacles_number:\n\n obstacle_x_pos = random.randint(0, Dimension.board_width() - 1)\n obstacle_y_pos = random.randint(0, Dimension.board_height() - 1)\n obstacle = Square(obstacle_x_pos, obstacle_y_pos)\n if obstacle not in obstacles:\n self.board_matrix[obstacle_y_pos][obstacle_x_pos] = 0\n obstacles.append(obstacle)\n\n return obstacles", "def advance_board(self):\n # We can advance the board using a pretty simple convolution,\n # so we don't have to execute a lot of loops in python.\n # Of course, this probably won't be sufficient for extremely\n # large boards.\n self.num_steps += 1\n board = self.board\n cfilter = np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.uint16)\n\n alive = board & CellTypes.alive > 0\n spawning = board & CellTypes.spawning > 0\n frozen = board & CellTypes.frozen > 0\n\n can_die = ~frozen & (\n convolve2d(board & CellTypes.preserving, cfilter) == 0)\n can_grow = ~frozen & (\n convolve2d(board & CellTypes.inhibiting, cfilter) == 0)\n\n num_neighbors = convolve2d(alive, cfilter)\n num_spawn = convolve2d(spawning, cfilter)\n spawn_prob = 1 - (1 - self.spawn_prob)**num_spawn\n has_spawned = coinflip(spawn_prob, board.shape)\n\n born_rule = np.zeros(9, dtype=bool)\n born_rule[list(self.born_rule)] = True\n dead_rule = np.ones(9, dtype=bool)\n dead_rule[list(self.survive_rule)] = False\n\n new_alive = (born_rule[num_neighbors] | has_spawned) & ~alive & can_grow\n new_dead = dead_rule[num_neighbors] & alive & can_die\n\n new_flags = np.zeros_like(board)\n color_weights = 1 * alive + 2 * spawning\n for color in CellTypes.colors:\n # For each of the colors, see if there are two or more neighbors\n # that have it. If so, any new cells (whether born or spawned)\n # will also get that color.\n has_color = board & color > 0\n new_color = convolve2d(has_color * color_weights, cfilter) >= 2\n new_flags += color * new_color\n indestructible = alive & (board & CellTypes.destructible == 0)\n new_flags += CellTypes.destructible * (convolve2d(indestructible, cfilter) < 2)\n\n board *= ~(new_alive | new_dead)\n board += new_alive * (CellTypes.alive + new_flags)", "def make_board(self, ):\n for r in range(self.boardSize):\n for c in range(self.boardSize): # avoid redundant calculation by adding neighbors \"behind\" current cell\n new_cell = Cell(r, c)\n self.board[r][c] = new_cell\n if c > 0: # add left neighbor-cell\n new_cell.add_neighbor(self.board[r][c-1])\n if r > 0: # add above neighbor-cell\n new_cell.add_neighbor(self.board[r-1][c])\n if r > 0 and c < self.boardSize-1: # add right diagonal neighbor-cell\n new_cell.add_neighbor(self.board[r-1][c+1])", "def new_board(self):\n\n # delete all objects\n self.canvas.delete('all')\n\n # reset\n self.board = [\n [self.EMPTY, self.EMPTY, self.EMPTY],\n [self.EMPTY, self.EMPTY, self.EMPTY],\n [self.EMPTY, self.EMPTY, self.EMPTY]]\n\n # draw grid\n for n in range(1, 3):\n # vertical\n self.canvas.create_line(\n self.CELL_SIZE*n, 0,\n self.CELL_SIZE*n, self.WINDOW_SIZE,\n width=self.GRID_LINE_WIDTH, fill=self.GRID_COLOR)\n # horizontal\n self.canvas.create_line(\n 0, self.CELL_SIZE*n,\n self.WINDOW_SIZE, self.CELL_SIZE*n,\n width=self.GRID_LINE_WIDTH, fill=self.GRID_COLOR)", "def new_board(n: int) -> Board:\n\n return [[0 for _ in range(n)] for _ in range(n)]", "def __init__(self, board_size=BOARD_SIZE, num_mines=NUM_MINES):\n\n self.board_size = board_size\n self.num_mines = num_mines\n self.board = place_mines(board_size, num_mines)\n self.my_board = np.ones((board_size, board_size), dtype=int) * CLOSED\n self.valid_actions = np.ones((self.board_size, self.board_size), dtype=np.bool)\n\n self.observation_space = spaces.Box(low=-2, high=9,\n shape=(self.board_size, self.board_size), dtype=np.int)\n self.action_space = spaces.MultiDiscrete([self.board_size, self.board_size])", "def create_board(self, size):\n self.board = [\n [FieldState.EMPTY for _ in range(size)]\n for _ in range(size)\n ]", "def __init__(self, board_size=BOARD_SIZE, num_mines=NUM_MINES):\n\n self.board_size = board_size\n self.num_mines = num_mines\n self.board = place_mines(board_size, num_mines)\n self.my_board = np.ones((board_size, board_size), dtype=int) * CLOSED\n self.num_actions = 0\n\n self.observation_space = spaces.Box(low=-2, high=9,\n shape=(self.board_size, self.board_size), dtype=np.int)\n self.action_space = spaces.Discrete(self.board_size*self.board_size)\n self.valid_actions = np.ones((self.board_size * self.board_size), dtype=np.bool)", "def init_cells(self):\n state = list()\n width = WIDTH / CELL_SIZE\n height = HEIGHT / CELL_SIZE\n\n for index in range(0, width * height):\n if randint(1, 100) >= 100 - CELL_DENSITY:\n # Live cell.\n status = NORMAL\n state.append(1)\n else:\n # Dead cell.\n status = HIDDEN\n state.append(0)\n\n cell = self.canvas.create_rectangle((index % width) * CELL_SIZE, (index / width) * CELL_SIZE,\n ((index % width) + 1) * CELL_SIZE, ((index / width) + 1) * CELL_SIZE,\n fill=\"black\", state=status, outline=\"white\")\n self.cells.append(cell)\n\n return state", "def __init__(self):\n self._board_area = [[\" \" for i in range(20)] for j in range(20)]\n\n # Starting setup for board includes these coordinates black, and their mirror white\n black_start = [(1, 2), (2, 2), (2, 1), (2, 3), (3, 2), (4, 1), (4, 3), (5, 2), (6, 1), (6, 3), (7, 1),\n (7, 2), (7, 3), (8, 1), (8, 2), (8, 3), (9, 1), (9, 2), (9, 3), (10, 1), (10, 2), (10, 3),\n (11, 1), (11, 3), (12, 1), (12, 2), (12, 3), (13, 1), (13, 3), (14, 2), (15, 1), (15, 3),\n (16, 2), (17, 1), (17, 2), (17, 3), (18, 2), (2, 6), (5, 6), (8, 6), (11, 6),\n (14, 6), (17, 6)]\n\n # Border points set for clearing out stones that move beyond the border\n self._border = set((0, i) for i in range(20)) | set((19, i) for i in range(20))\n self._border = self._border | set((i, 0) for i in range(20)) | set((i, 19) for i in range(20))\n\n # Fill black and white stones\n for coord in black_start:\n self._board_area[coord[0]][coord[1]] = \"B\"\n self._board_area[coord[0]][-coord[1] - 1] = \"W\"\n\n # Alphabetic indexing of board for alpha-numeric movement inputs\n self._locmap = dict(zip(\"abcdefghijklmnopqrst\", range(20)))", "def __init__(self):\n self.game_board = [' '] * 9\n self.size = len(self.game_board)\n self.move = 'X'\n self.player1 = None\n self.player2 = None\n self.current_player = None\n self.board_coords = {\n (1, 3): 0, (2, 3): 1, (3, 3): 2,\n (1, 2): 3, (2, 2): 4, (3, 2): 5,\n (1, 1): 6, (2, 1): 7, (3, 1): 8\n }\n\n self.winning_cases = [\n (0, 1, 2), (3, 4, 5), (6, 7, 8),\n (0, 3, 6), (1, 4, 7), (2, 5, 8),\n (0, 4, 8), (2, 4, 6)\n ]", "def updated_board(board_w, board_h, piece_list, board, position):\n board_state = board.state\n new_board = Board(board_w, board_h, 1, piece_list, position)\n new_board.state = board_state\n return new_board", "def __init__(self, size, board):\n self.BoardSize = size #the size of the board\n self.CurrentGameBoard= board #the current state of the game board", "def __init__(self, board_size=MAX_BOARD_SIZE, cell_size=MAX_CELL_SIZE, dead_color=DEAD, alive_color=ALIVE):\n self._board_size = board_size\n self._cell_size = cell_size\n self.dead_color = dead_color\n self.alive_color = alive_color\n\n self.board = []\n self.mode = 0", "def __init__(self, size):\n\t\tself.size = size\n\t\tself.board = []\n\t\tnew = []\n\t\tfor i in range(0, size, 1):\n\t\t\tfor j in range(0, size, 1):\n\t\t\t\tnew.append(0)\n\t\t\tself.board.append(new)\n\t\t\tnew = []", "def create_board(self):\n board = dict()\n cell_names = self.create_cell_names()\n\n for cell_name in cell_names:\n if cell_name in self.given_cells:\n is_given = True\n value = self.given_cells[cell_name]\n else:\n is_given = False\n value = 0\n new_cell = c.Cell(cell_name, is_given, value, self.size)\n board[cell_name] = new_cell\n return board", "def make_move(self, move: Any) -> \"StonehengeState\":\n new_board = deepcopy(self.current_board)\n for index in range(len(self.current_board)):\n if self.current_board[index] == move:\n if self.p1_turn:\n new_board = new_board.replace(\n self.current_board[index], '1')\n else:\n new_board = new_board.replace(\n self.current_board[index], '2')\n new_ley_lines = deepcopy(self.current_ley_lines)\n for item in new_ley_lines:\n for key in item:\n for index in range(len(key[1])):\n if key[1][index] == move:\n if self.p1_turn:\n key[1][index] = '1'\n else:\n key[1][index] = '2'\n change_dict = {}\n for item in new_ley_lines:\n for key in item:\n p1_count = 0\n p2_count = 0\n for string in key[1]:\n if string == '1':\n p1_count += 1\n if string == '2':\n p2_count += 1\n\n\n if p1_count >= len(key[1])/2 and p1_count > p2_count:\n\n change_dict[key[0]] = '1'\n if p2_count >= len(key[1])/2 and p2_count > p1_count:\n\n change_dict[key[0]] = '2'\n for key in change_dict:\n if not (key == '1' or key == '2'):\n if str(key) in new_board:\n new_board = new_board.replace(str(key), change_dict[key])\n for item in new_ley_lines:\n for key1 in item:\n if key == key1[0]:\n key1[0] = change_dict[key]\n\n new_state = StonehengeState(not self.p1_turn, self.side_length,\n new_board, new_ley_lines)\n return new_state", "def makeBoard(n):\n valid_positions = []\n for i in range(0, n):\n for j in range(0,n):\n valid_positions.append(Position(i,j))\n return valid_positions", "def place_obj(self):\r\n for pos in BOARD_POSITIONS:\r\n self.board[pos[0]][pos[1]] = Stone(color=self.state[pos[0]][pos[1]], pos=(pos[0], pos[1]))\r\n self.board[pos[0]][pos[1]].liberty = self.board[pos[0]][pos[1]].compute_liberty(self.state)", "def makeState(*args,**kwargs):\n \n cells = []\n\n for item in args:\n #print item\n cells.append(item)\n \n newState = State(cells)\n #newState.printBoard()\n return newState", "def initialize_board(self):\n self.board = np.zeros(shape=(BOARD_SIZE, BOARD_SIZE), dtype=np.int) # another way of defining board: [[for x in range(cm.BOARD_SIZE)] for x in range(cm.BOARD_SIZE)]\n center = int(BOARD_SIZE / 2)\n self.board[center-1][center-1] = self.board[center][center] = WHITE # place the board according to position\n self.board[center][center-1] = self.board[center-1][center] = BLACK\n self.black_piece = 2\n self.white_piece = 2", "def _new_board(board_size):\n return tuple(tuple(0 for _ in range(board_size)) for _ in range(board_size))", "def init_board():\n\t# Generates a table 10*10 of 0s with -1 around and the initial state\n\t# of the board with 2 whites and 2 blacks in the middle\n\ttable = [[0 if i != 0 and i != 9 else -1 for i in range(10)] if j != 0 and j != 9 else [-1 for i in range(10)] for j in range(10)] #leaves a -1 line around the whole table of 0s\n\t#initial state is drawn and recorded\n\ttable[4][4] = 2\n\ttable[5][5] = 2\n\ttable[4][5] = 1\n\ttable[5][4] = 1\n\tdrawPiece((4,4),2)\n\tdrawPiece((5,5),2)\n\tdrawPiece((4,5),1)\n\tdrawPiece((5,4),1)\n\treturn table", "def make_board():\n board = bingo_numbers()\n board[2][2] = ''\n return board", "def make(self,state_board):\n\t\tstate_board[self.column][self.line] = self.couleur #place the piece\n\t\tdrawPiece((self.column,self.line),self.couleur) #draws it on the board\n\t\tfor pos in self.flips: #flips all the pieces in flips\n\t\t\tstate_board[pos[0]][pos[1]] = self.couleur\n\t\t\tdrawPiece(pos,self.couleur) #draws it on the board", "def create_board(N):\n board = [[0 for x in range(N)] for y in range(N)] \n return board" ]
[ "0.7055454", "0.67278403", "0.67269707", "0.6680739", "0.6606683", "0.6588023", "0.6581234", "0.65147763", "0.65075326", "0.6501786", "0.64927673", "0.64923614", "0.64883053", "0.64836043", "0.645332", "0.64511937", "0.64277464", "0.6404062", "0.63852406", "0.63647217", "0.6361234", "0.6357557", "0.6349266", "0.6336141", "0.63070947", "0.6305831", "0.63048196", "0.63003284", "0.6295785", "0.6285814" ]
0.73487616
0